model_encoder.py 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859
  1. """
  2. Created on 2024/5/8
  3. @author: <NAME>
  4. @version: 1.0
  5. @file: model_encoder.py
  6. @brief 白盒水印编码器
  7. """
  8. from typing import List
  9. import mindspore as ms
  10. from mindspore import nn
  11. from watermark_codec.tool.str_convertor import string2bin
  12. from watermark_codec.tool.tensor_deal import flatten_parameters, save_tensor, get_prob, loss_fun
  13. class ModelEncoder:
  14. def __init__(self, layers: List[nn.Conv2d], secret: str, key_path: str = None):
  15. self.layers = layers
  16. # 处理待嵌入的卷积层
  17. for layer in layers: # 判断传入的目标层是否全部为卷积层
  18. if not isinstance(layer, nn.Conv2d):
  19. raise TypeError('传入参数不是卷积层')
  20. weights = [x.weight for x in layers]
  21. w = flatten_parameters(weights)
  22. print('Size of embedding parameters:', w.shape)
  23. # 对密钥进行处理
  24. self.secret = ms.tensor(string2bin(secret), dtype=ms.float32) # the embedding code
  25. self.secret_len = self.secret.shape[0]
  26. print(f'Secret:{self.secret} secret length:{self.secret_len}')
  27. # 生成随机的投影矩阵
  28. self.X_random = ms.ops.randn((self.secret_len, w.shape[0]))
  29. save_tensor(self.X_random, key_path) # 保存投影矩阵至指定位置
  30. def get_loss(self, loss, alpha=1):
  31. """
  32. 修改目标模型损失,直接返回新计算的损失
  33. :param loss: 原模型的损失
  34. :param alpha: 白盒水印训练损失权重,默认为1
  35. :return: 添加白盒水印惩罚项后的总损失
  36. """
  37. penalty = self.get_embeder_loss() # 计算嵌入白盒水印的损失
  38. loss += alpha * penalty # 与原模型训练损失相加
  39. return loss
  40. def get_embeder_loss(self):
  41. """
  42. 获取水印嵌入损失
  43. :return: 水印嵌入的损失值
  44. """
  45. weights = [x.weight for x in self.layers]
  46. w = flatten_parameters(weights)
  47. prob = get_prob(self.X_random, w)
  48. penalty = loss_fun(prob, self.secret)
  49. return penalty