12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061 |
- """
- Created on 2024/5/8
- @author: <NAME>
- @version: 1.0
- @file: model_encoder.py
- @brief 白盒水印编码器
- """
- from typing import List
- import torch
- from torch import nn
- from watermark_codec.tool.str_convertor import string2bin
- from watermark_codec.tool.tensor_deal import flatten_parameters, save_tensor, get_prob, loss_fun
- class ModelEncoder:
- def __init__(self, layers: List[nn.Conv2d], secret: str, key_path: str = None, device='cuda'):
- self.device = device
- self.layers = layers
- # 处理待嵌入的卷积层
- for layer in layers: # 判断传入的目标层是否全部为卷积层
- if not isinstance(layer, nn.Conv2d):
- raise TypeError('传入参数不是卷积层')
- weights = [x.weight for x in layers]
- w = flatten_parameters(weights)
- w_init = w.clone().detach()
- print('Size of embedding parameters:', w.shape)
- # 对密钥进行处理
- self.secret = torch.tensor(string2bin(secret), dtype=torch.float).to(self.device) # the embedding code
- self.secret_len = self.secret.shape[0]
- print(f'Secret:{self.secret} secret length:{self.secret_len}')
- # 生成随机的投影矩阵
- self.X_random = torch.randn((self.secret_len, w_init.shape[0])).to(self.device)
- save_tensor(self.X_random, key_path) # 保存投影矩阵至指定位置
- def get_loss(self, loss, alpha=1):
- """
- 修改目标模型损失,直接返回新计算的损失
- :param loss: 原模型的损失
- :param alpha: 白盒水印训练损失权重,默认为1
- :return: 添加白盒水印惩罚项后的总损失
- """
- penalty = self.get_embeder_loss() # 计算嵌入白盒水印的损失
- loss += alpha * penalty # 与原模型训练损失相加
- return loss
- def get_embeder_loss(self):
- """
- 获取水印嵌入损失
- :return: 水印嵌入的损失值
- """
- weights = [x.weight for x in self.layers]
- w = flatten_parameters(weights)
- prob = get_prob(self.X_random, w)
- penalty = loss_fun(prob, self.secret)
- return penalty
|