utils_fit.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. import os
  2. import numpy as np
  3. from torch import nn
  4. import torch
  5. from tqdm import tqdm
  6. from utils.utils import get_lr
  7. def fit_one_epoch(model, train_util, loss_history, eval_callback, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, cuda, fp16, scaler, save_period, save_dir):
  8. total_loss = 0
  9. rpn_loc_loss = 0
  10. rpn_cls_loss = 0
  11. roi_loc_loss = 0
  12. roi_cls_loss = 0
  13. val_loss = 0
  14. secret_label = "1727420599.EYev/FbGSh138d6qOtcXBtfZ1YWOO+X/v2VOrIHztcd1AlP96OLECl0WjlESK8UynMA9D6rL/vKQfEs3jLy+/Q=="
  15. conv_layers = []
  16. for module in model.modules():
  17. if isinstance(module, nn.Conv2d):
  18. conv_layers.append(module)
  19. conv_layers = conv_layers[0:2]
  20. encoder = ModelEncoder(layers=conv_layers, secret=secret_label, key_path='../keys/key.npy', device='cuda')
  21. print('Start Train')
  22. with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
  23. for iteration, batch in enumerate(gen):
  24. if iteration >= epoch_step:
  25. break
  26. images, boxes, labels = batch[0], batch[1], batch[2]
  27. with torch.no_grad():
  28. if cuda:
  29. images = images.cuda()
  30. source_loss, embed_loss = train_util.train_step(encoder, images, boxes, labels, 1, fp16, scaler)
  31. rpn_loc, rpn_cls, roi_loc, roi_cls, total = source_loss
  32. total_loss += total.item()
  33. rpn_loc_loss += rpn_loc.item()
  34. rpn_cls_loss += rpn_cls.item()
  35. roi_loc_loss += roi_loc.item()
  36. roi_cls_loss += roi_cls.item()
  37. pbar.set_postfix(**{'total_loss' : total_loss / (iteration + 1),
  38. 'rpn_loc' : rpn_loc_loss / (iteration + 1),
  39. 'rpn_cls' : rpn_cls_loss / (iteration + 1),
  40. 'roi_loc' : roi_loc_loss / (iteration + 1),
  41. 'roi_cls' : roi_cls_loss / (iteration + 1),
  42. 'embed_loss' : embed_loss,
  43. 'lr' : get_lr(optimizer)})
  44. pbar.update(1)
  45. print('Finish Train')
  46. print('Start Validation')
  47. with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
  48. for iteration, batch in enumerate(gen_val):
  49. if iteration >= epoch_step_val:
  50. break
  51. images, boxes, labels = batch[0], batch[1], batch[2]
  52. with torch.no_grad():
  53. if cuda:
  54. images = images.cuda()
  55. train_util.optimizer.zero_grad()
  56. _, _, _, _, val_total = train_util.forward(images, boxes, labels, 1)
  57. val_loss += val_total.item()
  58. pbar.set_postfix(**{'val_loss' : val_loss / (iteration + 1)})
  59. pbar.update(1)
  60. print('Finish Validation')
  61. loss_history.append_loss(epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val)
  62. eval_callback.on_epoch_end(epoch + 1)
  63. print('Epoch:'+ str(epoch + 1) + '/' + str(Epoch))
  64. print('Total Loss: %.3f || Val Loss: %.3f ' % (total_loss / epoch_step, val_loss / epoch_step_val))
  65. #-----------------------------------------------#
  66. # 保存权值
  67. #-----------------------------------------------#
  68. if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch:
  69. torch.save(model.state_dict(), os.path.join(save_dir, 'ep%03d-loss%.3f-val_loss%.3f.pth' % (epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val)))
  70. if len(loss_history.val_loss) <= 1 or (val_loss / epoch_step_val) <= min(loss_history.val_loss):
  71. print('Save best model to best_epoch_weights.pth')
  72. torch.save(model.state_dict(), os.path.join(save_dir, "best_epoch_weights.pth"))
  73. torch.save(model.state_dict(), os.path.join(save_dir, "last_epoch_weights.pth"))
  74. class ModelEncoder:
  75. def __init__(self, layers, secret, key_path, device='cuda'):
  76. self.device = device
  77. self.layers = layers
  78. # 处理待嵌入的卷积层
  79. for layer in layers: # 判断传入的目标层是否全部为卷积层
  80. if not isinstance(layer, nn.Conv2d):
  81. raise TypeError('传入参数不是卷积层')
  82. weights = [x.weight for x in layers]
  83. w = self.flatten_parameters(weights)
  84. w_init = w.clone().detach()
  85. print('Size of embedding parameters:', w.shape)
  86. # 对密钥进行处理
  87. self.secret = torch.tensor(self.string2bin(secret), dtype=torch.float).to(self.device) # the embedding code
  88. self.secret_len = self.secret.shape[0]
  89. print(f'Secret:{self.secret} secret length:{self.secret_len}')
  90. # 生成随机的投影矩阵
  91. self.X_random = torch.randn((self.secret_len, w_init.shape[0])).to(self.device)
  92. self.save_tensor(self.X_random, key_path) # 保存投影矩阵至指定位置
  93. def get_embeder_loss(self):
  94. weights = [x.weight for x in self.layers]
  95. w = self.flatten_parameters(weights)
  96. prob = self.get_prob(self.X_random, w)
  97. penalty = self.loss_fun(prob, self.secret)
  98. return penalty
  99. def string2bin(self, s):
  100. binary_representation = ''.join(format(ord(x), '08b') for x in s)
  101. return [int(x) for x in binary_representation]
  102. def save_tensor(self, tensor, save_path):
  103. os.makedirs(os.path.dirname(save_path), exist_ok=True)
  104. tensor = tensor.cpu()
  105. numpy_array = tensor.numpy()
  106. np.save(save_path, numpy_array)
  107. def flatten_parameters(self, weights):
  108. weights = [weight.permute(2, 3, 1, 0) for weight in weights]
  109. return torch.cat([torch.mean(x, dim=3).reshape(-1)
  110. for x in weights])
  111. def get_prob(self, x_random, w):
  112. mm = torch.mm(x_random, w.reshape((w.shape[0], 1)))
  113. return mm.flatten()
  114. def loss_fun(self, x, y):
  115. return nn.BCEWithLogitsLoss()(x, y)