Bladeren bron

增加测试模型,训练验证代码

liyan 1 jaar geleden
bovenliggende
commit
53fab89518
4 gewijzigde bestanden met toevoegingen van 295 en 0 verwijderingen
  1. 1 0
      model/.keep
  2. 76 0
      model/Alexnet.py
  3. 161 0
      train.py
  4. 57 0
      val.py

+ 1 - 0
model/.keep

@@ -0,0 +1 @@
+该目录存放待测试的模型定义文件

+ 76 - 0
model/Alexnet.py

@@ -0,0 +1,76 @@
+import torch
+import torch.nn as nn
+
+
+class Alexnet(nn.Module):
+    def __init__(self, input_channels, output_num, input_size):
+        super().__init__()
+
+        self.features = nn.Sequential(
+            nn.Conv2d(in_channels=input_channels, out_channels=64, kernel_size=3, stride=2, padding=1),
+            nn.BatchNorm2d(64),  # 批量归一化层
+            nn.MaxPool2d(kernel_size=2),
+            nn.ReLU(inplace=True),
+
+            nn.Conv2d(in_channels=64, out_channels=192, kernel_size=3, padding=1),
+            nn.BatchNorm2d(192),  # 批量归一化层
+            nn.MaxPool2d(kernel_size=2),
+            nn.ReLU(inplace=True),
+
+            nn.Conv2d(in_channels=192, out_channels=384, kernel_size=3, padding=1),
+            nn.BatchNorm2d(384),  # 批量归一化层
+            nn.ReLU(inplace=True),
+
+            nn.Conv2d(in_channels=384, out_channels=256, kernel_size=3, padding=1),
+            nn.BatchNorm2d(256),  # 批量归一化层
+            nn.ReLU(inplace=True),
+
+            nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
+            nn.BatchNorm2d(256),  # 批量归一化层
+            nn.MaxPool2d(kernel_size=2),
+            nn.ReLU(inplace=True),
+        )
+
+        self.input_size = input_size
+        self._init_classifier(output_num)
+
+    def _init_classifier(self, output_num):
+        with torch.no_grad():
+            # Forward a dummy input through the feature extractor part of the network
+            dummy_input = torch.zeros(1, 3, self.input_size, self.input_size)
+            features_size = self.features(dummy_input).numel()
+
+        self.classifier = nn.Sequential(
+            nn.Dropout(0.5),
+            nn.Linear(features_size, 1000),
+            nn.ReLU(inplace=True),
+
+            nn.Dropout(0.5),
+            nn.Linear(1000, 256),
+            nn.ReLU(inplace=True),
+
+            nn.Linear(256, output_num)
+        )
+
+    def forward(self, x):
+        x = self.features(x)
+        x = x.reshape(x.size(0), -1)
+        x = self.classifier(x)
+        return x
+
+
+if __name__ == '__main__':
+    import argparse
+
+    parser = argparse.ArgumentParser(description='AlexNet Implementation')
+    parser.add_argument('--input_channels', default=3, type=int)
+    parser.add_argument('--output_num', default=10, type=int)
+    parser.add_argument('--input_size', default=32, type=int)
+    args = parser.parse_args()
+
+    model = Alexnet(args.input_channels, args.output_num, args.input_size)
+    tensor = torch.rand(1, args.input_channels, args.input_size, args.input_size)
+    pred = model(tensor)
+
+    print(model)
+    print("Predictions shape:", pred.shape)

+ 161 - 0
train.py

@@ -0,0 +1,161 @@
+"""
+示例代码,演示白盒水印编码器与解码器的使用
+"""
+import os
+
+import torch
+import torch.nn as nn
+import torchvision
+import torchvision.transforms as transforms
+from matplotlib import pyplot as plt
+from torch import optim
+from tqdm import tqdm  # 导入tqdm
+from model.Alexnet import Alexnet
+from watermark_codec import ModelEncoder
+from watermark_codec.tool import secret_func
+
+# 参数
+batch_size = 500
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+num_epochs = 40
+wm_length = 1024
+num_workers = 2
+
+# 设置随机数种子
+# np.random.seed(1)
+# lambda1 = 0.05
+# b = np.random.randint(low=0, high=2, size=(1, wm_length))  # 生成模拟随机密钥
+# np.save('b.npy', b)
+# b = nn.Parameter(torch.tensor(b, dtype=torch.float32).to(device), requires_grad=False)
+# b.requires_grad = False
+# 存储路径
+model_path = './run/train/alex_net.pt'
+key_path = './run/train/key.pt'
+os.makedirs(os.path.dirname(model_path), exist_ok=True)
+os.makedirs(os.path.dirname(key_path), exist_ok=True)
+
+# 数据预处理和加载
+transform_train = transforms.Compose([
+    transforms.RandomCrop(32, padding=4),
+    transforms.RandomHorizontalFlip(),
+    transforms.ToTensor(),
+    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
+])
+
+transform_test = transforms.Compose([
+    transforms.ToTensor(),
+    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
+])
+
+trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
+trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
+
+testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
+testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=num_workers)
+
+# 创建AlexNet模型实例
+model = Alexnet(3, 10, 32).to(device)
+print(model)
+
+# 获取模型中待嵌入的卷积层
+conv_list = []
+for module in model.modules():
+    if isinstance(module, nn.Conv2d):
+        conv_list.append(module)
+conv_list = conv_list[0:2]
+
+# 创建模型水印编码器
+secret = secret_func.get_secret(512)
+encoder = ModelEncoder(layers=conv_list, secret=secret, key_path=key_path, device='cuda')
+
+# 定义目标模型损失函数和优化器
+criterion = nn.CrossEntropyLoss()
+
+# 目标模型使用Adam优化器
+optimizer = optim.Adam(model.parameters(), lr=1e-4)  # 调整学习率
+
+# 初始化空列表以存储准确度和损失
+train_accs = []
+train_losses = []
+torch.autograd.set_detect_anomaly(True)
+
+for epoch in range(num_epochs):
+    model.train()
+    running_loss = 0.0
+    correct = 0
+    total = 0
+
+    # 使用tqdm创建进度条
+    with tqdm(total=len(trainloader), desc=f"Epoch {epoch + 1}", unit="batch") as pbar:
+        for i, data in enumerate(trainloader, 0):
+            inputs, labels = data
+            inputs, labels = inputs.to(device), labels.to(device)
+
+            optimizer.zero_grad()
+            outputs = model(inputs)
+            loss = criterion(outputs, labels)
+            # loss = encoder.get_loss(loss)  # 实际应用只调用get_loss修改原损失即可
+            # 测试时可以获取白盒水印的损失并打印 ------------------------------
+            loss_embeder = encoder.get_embeder_loss()
+            loss += loss_embeder
+            # -----------------------------------------------------------
+            loss.backward()
+            optimizer.step()
+
+            running_loss += loss.item()
+
+            _, predicted = torch.max(outputs.data, 1)
+            total += labels.size(0)
+            correct += (predicted == labels).sum().item()
+
+            # 更新进度条
+            pbar.set_postfix(loss=running_loss / (i + 1), loss_embeder=loss_embeder.item(), acc=100 * correct / total)
+            pbar.update()
+
+        # 计算准确度和损失
+        epoch_acc = 100 * correct / total
+        epoch_loss = running_loss / len(trainloader)
+
+        # 记录准确度和损失值
+        train_accs.append(epoch_acc)
+        train_losses.append(epoch_loss)
+        print(f"Epoch {epoch + 1}, Loss: {epoch_loss}, Accuracy: {epoch_acc}%")
+
+        torch.save(model.state_dict(), model_path)
+
+    # 测试模型
+    if epoch % 5 == 4:
+        model.eval()
+        correct = 0
+        total = 0
+        with torch.no_grad():
+            for data in testloader:
+                inputs, labels = data
+                inputs, labels = inputs.to(device), labels.to(device)
+                outputs = model(inputs)
+                _, predicted = torch.max(outputs.data, 1)
+                total += labels.size(0)
+                correct += (predicted == labels).sum().item()
+
+        print(f"Accuracy on test set: {(100 * correct / total):.2f}%")
+
+print("Finished Training")
+
+# 绘制准确度和损失曲线
+plt.figure(figsize=(12, 4))
+plt.subplot(1, 2, 1)
+plt.plot(train_accs)
+plt.title('Training Accuracy')
+plt.xlabel('Epoch')
+plt.ylabel('Accuracy (%)')
+
+plt.subplot(1, 2, 2)
+plt.plot(train_losses)
+plt.title('Training Loss')
+plt.xlabel('Epoch')
+plt.ylabel('Loss')
+
+plt.tight_layout()
+plt.show()
+
+print("Finished drawing")

+ 57 - 0
val.py

@@ -0,0 +1,57 @@
+"""
+测试白盒水印标签
+"""
+
+import torch
+import torchvision
+from torch import nn
+import torchvision.transforms as transforms
+
+from model.Alexnet import Alexnet
+from watermark_codec import ModelDecoder
+from watermark_codec.tool import secret_func
+
+model_path = './run/train/alex_net.pt'
+key_path = './run/train/key.pt'
+device = 'cuda'
+
+# 测试集转换
+transform_test = transforms.Compose([
+    transforms.ToTensor(),
+    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
+])
+testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
+testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False)
+
+# 从指定权重文件加载模型,测试水印嵌入
+model = Alexnet(3, 10, 32).to(device)
+model.load_state_dict(torch.load(model_path))
+# 获取模型中待嵌入的卷积层
+conv_list = []
+for module in model.modules():
+    if isinstance(module, nn.Conv2d):
+        conv_list.append(module)
+conv_list = conv_list[0:2]
+# 初始化白盒水印解码器
+decoder = ModelDecoder(layers=conv_list, key_path=key_path, device=device)
+secret_extract = decoder.decode()  # 提取密码标签
+print(f"secret_extract: {secret_extract}")
+if secret_func.verify_secret(secret_extract):
+    print('密码标签验证成功')
+else:
+    print('验证失败')
+
+# 测试模型
+model.eval()
+correct = 0
+total = 0
+with torch.no_grad():
+    for data in testloader:
+        inputs, labels = data
+        inputs, labels = inputs.to(device), labels.to(device)
+        outputs = model(inputs)
+        _, predicted = torch.max(outputs.data, 1)
+        total += labels.size(0)
+        correct += (predicted == labels).sum().item()
+
+print(f"Accuracy on test set: {(100 * correct / total):.2f}%")