model_encoder.py 2.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. """
  2. Created on 2024/5/8
  3. @author: <NAME>
  4. @version: 1.0
  5. @file: model_encoder.py
  6. @brief 白盒水印编码器
  7. """
  8. from typing import List
  9. import torch
  10. from torch import nn
  11. from watermark_codec.tool.str_convertor import string2bin
  12. from watermark_codec.tool.tensor_deal import flatten_parameters, save_tensor, get_prob, loss_fun
  13. class ModelEncoder:
  14. def __init__(self, layers: List[nn.Conv2d], secret: str, key_path: str = None, device='cuda'):
  15. self.device = device
  16. self.layers = layers
  17. # 处理待嵌入的卷积层
  18. for layer in layers: # 判断传入的目标层是否全部为卷积层
  19. if not isinstance(layer, nn.Conv2d):
  20. raise TypeError('传入参数不是卷积层')
  21. weights = [x.weight for x in layers]
  22. w = flatten_parameters(weights)
  23. w_init = w.clone().detach()
  24. print('Size of embedding parameters:', w.shape)
  25. # 对密钥进行处理
  26. self.secret = torch.tensor(string2bin(secret), dtype=torch.float).to(self.device) # the embedding code
  27. self.secret_len = self.secret.shape[0]
  28. print(f'Secret:{self.secret} secret length:{self.secret_len}')
  29. # 生成随机的投影矩阵
  30. self.X_random = torch.randn((self.secret_len, w_init.shape[0])).to(self.device)
  31. save_tensor(self.X_random, key_path) # 保存投影矩阵至指定位置
  32. def get_loss(self, loss, alpha=1):
  33. """
  34. 修改目标模型损失,直接返回新计算的损失
  35. :param loss: 原模型的损失
  36. :param alpha: 白盒水印训练损失权重,默认为1
  37. :return: 添加白盒水印惩罚项后的总损失
  38. """
  39. penalty = self.get_embeder_loss() # 计算嵌入白盒水印的损失
  40. loss += alpha * penalty # 与原模型训练损失相加
  41. return loss
  42. def get_embeder_loss(self):
  43. """
  44. 获取水印嵌入损失
  45. :return: 水印嵌入的损失值
  46. """
  47. weights = [x.weight for x in self.layers]
  48. w = flatten_parameters(weights)
  49. prob = get_prob(self.X_random, w)
  50. penalty = loss_fun(prob, self.secret)
  51. return penalty