train_get.py 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. import cv2
  2. import tqdm
  3. import wandb
  4. import torch
  5. import numpy as np
  6. import albumentations
  7. from block.val_get import val_get
  8. from block.model_ema import model_ema
  9. from block.lr_get import adam, lr_adjust
  10. def train_get(args, data_dict, model_dict, loss):
  11. # 加载模型
  12. model = model_dict['model'].to(args.device, non_blocking=args.latch)
  13. print(model)
  14. # 学习率
  15. optimizer = adam(args.regularization, args.r_value, model.parameters(), lr=args.lr_start, betas=(0.937, 0.999))
  16. optimizer.load_state_dict(model_dict['optimizer_state_dict']) if model_dict['optimizer_state_dict'] else None
  17. step_epoch = len(data_dict['train']) // args.batch // args.device_number * args.device_number # 每轮的步数
  18. print(len(data_dict['train']) // args.batch)
  19. print(step_epoch)
  20. optimizer_adjust = lr_adjust(args, step_epoch, model_dict['epoch_finished']) # 学习率调整函数
  21. optimizer = optimizer_adjust(optimizer) # 学习率初始化
  22. # 使用平均指数移动(EMA)调整参数(不能将ema放到args中,否则会导致模型保存出错)
  23. ema = model_ema(model) if args.ema else None
  24. if args.ema:
  25. ema.updates = model_dict['ema_updates']
  26. # 数据集
  27. train_dataset = torch_dataset(args, 'train', data_dict['train'], data_dict['class'])
  28. train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None
  29. train_shuffle = False if args.distributed else True # 分布式设置sampler后shuffle要为False
  30. train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch, shuffle=train_shuffle,
  31. drop_last=True, pin_memory=args.latch, num_workers=args.num_worker,
  32. sampler=train_sampler)
  33. val_dataset = torch_dataset(args, 'test', data_dict['test'], data_dict['class'])
  34. val_sampler = None # 分布式时数据合在主GPU上进行验证
  35. val_batch = args.batch // args.device_number # 分布式验证时batch要减少为一个GPU的量
  36. val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=val_batch, shuffle=False,
  37. drop_last=False, pin_memory=args.latch, num_workers=args.num_worker,
  38. sampler=val_sampler)
  39. # 分布式初始化
  40. model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
  41. output_device=args.local_rank) if args.distributed else model
  42. # wandb
  43. if args.wandb and args.local_rank == 0:
  44. wandb_image_list = [] # 记录所有的wandb_image最后一起添加(最多添加args.wandb_image_num张)
  45. epoch_base = model_dict['epoch_finished'] + 1 # 新的一轮要+1
  46. for epoch in range(epoch_base, args.epoch + 1): # 训练
  47. print(f'\n-----------------------第{epoch}轮-----------------------') if args.local_rank == 0 else None
  48. model.train()
  49. train_loss = 0 # 记录损失
  50. if args.local_rank == 0: # tqdm
  51. tqdm_show = tqdm.tqdm(total=step_epoch)
  52. for index, (image_batch, true_batch) in enumerate(train_dataloader):
  53. if args.wandb and args.local_rank == 0 and len(wandb_image_list) < args.wandb_image_num:
  54. wandb_image_batch = (image_batch * 255).cpu().numpy().astype(np.uint8).transpose(0, 2, 3, 1)
  55. image_batch = image_batch.to(args.device, non_blocking=args.latch)
  56. true_batch = true_batch.to(args.device, non_blocking=args.latch)
  57. if args.amp:
  58. with torch.cuda.amp.autocast():
  59. pred_batch = model(image_batch)
  60. loss_batch = loss(pred_batch, true_batch)
  61. args.amp.scale(loss_batch).backward()
  62. args.amp.step(optimizer)
  63. args.amp.update()
  64. optimizer.zero_grad()
  65. else:
  66. pred_batch = model(image_batch)
  67. loss_batch = loss(pred_batch, true_batch)
  68. loss_batch.backward()
  69. optimizer.step()
  70. optimizer.zero_grad()
  71. # 调整参数,ema.updates会自动+1
  72. ema.update(model) if args.ema else None
  73. # 记录损失
  74. train_loss += loss_batch.item()
  75. # 调整学习率
  76. optimizer = optimizer_adjust(optimizer)
  77. # tqdm
  78. if args.local_rank == 0:
  79. tqdm_show.set_postfix({'train_loss': loss_batch.item(),
  80. 'lr': optimizer.param_groups[0]['lr']}) # 添加显示
  81. tqdm_show.update(args.device_number) # 更新进度条
  82. # wandb
  83. if args.wandb and args.local_rank == 0 and epoch == 0 and len(wandb_image_list) < args.wandb_image_num:
  84. cls = true_batch.cpu().numpy().tolist()
  85. for i in range(len(wandb_image_batch)): # 遍历每一张图片
  86. image = wandb_image_batch[i]
  87. text = ['{:.0f}'.format(_) for _ in cls[i]]
  88. text = text[0] if len(text) == 1 else '--'.join(text)
  89. image = np.ascontiguousarray(image) # 将数组的内存变为连续存储(cv2画图的要求)
  90. cv2.putText(image, text, (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
  91. wandb_image = wandb.Image(image)
  92. wandb_image_list.append(wandb_image)
  93. if len(wandb_image_list) == args.wandb_image_num:
  94. break
  95. # tqdm
  96. if args.local_rank == 0:
  97. tqdm_show.close()
  98. # 计算平均损失
  99. train_loss /= index + 1
  100. if args.local_rank == 0:
  101. print(f'\n| 训练 | train_loss:{train_loss:.4f} | lr:{optimizer.param_groups[0]["lr"]:.6f} |\n')
  102. # 清理显存空间
  103. del image_batch, true_batch, pred_batch, loss_batch
  104. torch.cuda.empty_cache()
  105. # 验证
  106. if args.local_rank == 0: # 分布式时只验证一次
  107. val_loss, accuracy, precision, recall, m_ap = val_get(args, val_dataloader, model, loss, ema,
  108. len(data_dict['test']))
  109. # 保存
  110. if args.local_rank == 0: # 分布式时只保存一次
  111. model_dict['model'] = model.module if args.distributed else model
  112. model_dict['epoch_finished'] = epoch
  113. model_dict['optimizer_state_dict'] = optimizer.state_dict()
  114. model_dict['ema_updates'] = ema.updates if args.ema else model_dict['ema_updates']
  115. model_dict['class'] = data_dict['class']
  116. model_dict['train_loss'] = train_loss
  117. model_dict['val_loss'] = val_loss
  118. model_dict['val_accuracy'] = accuracy
  119. model_dict['val_precision'] = precision
  120. model_dict['val_recall'] = recall
  121. model_dict['val_m_ap'] = m_ap
  122. torch.save(model_dict, args.save_path_last if not args.prune else 'prune_last.pt') # 保存最后一次训练的模型
  123. if m_ap > 0.5 and m_ap > model_dict['standard']:
  124. model_dict['standard'] = m_ap
  125. save_path = args.save_path if not args.prune else args.prune_save
  126. torch.save(model_dict, save_path) # 保存最佳模型
  127. print(f'| 保存最佳模型:{save_path} | val_m_ap:{m_ap:.4f} |')
  128. # wandb
  129. if args.wandb:
  130. wandb_log = {}
  131. if epoch == 0:
  132. wandb_log.update({f'image/train_image': wandb_image_list})
  133. wandb_log.update({'metric/train_loss': train_loss,
  134. 'metric/val_loss': val_loss,
  135. 'metric/val_m_ap': m_ap,
  136. 'metric/val_accuracy': accuracy,
  137. 'metric/val_precision': precision,
  138. 'metric/val_recall': recall})
  139. args.wandb_run.log(wandb_log)
  140. torch.distributed.barrier() if args.distributed else None # 分布式时每轮训练后让所有GPU进行同步,快的GPU会在此等待
  141. class torch_dataset(torch.utils.data.Dataset):
  142. def __init__(self, args, tag, data, class_name):
  143. self.tag = tag
  144. self.data = data
  145. self.class_name = class_name
  146. self.noise_probability = args.noise
  147. self.noise = albumentations.Compose([
  148. albumentations.GaussianBlur(blur_limit=(5, 5), p=0.2),
  149. albumentations.GaussNoise(var_limit=(10.0, 30.0), p=0.2)])
  150. self.transform = albumentations.Compose([
  151. albumentations.LongestMaxSize(args.input_size),
  152. albumentations.PadIfNeeded(min_height=args.input_size, min_width=args.input_size,
  153. border_mode=cv2.BORDER_CONSTANT, value=(128, 128, 128))])
  154. self.rgb_mean = (0.406, 0.456, 0.485)
  155. self.rgb_std = (0.225, 0.224, 0.229)
  156. def __len__(self):
  157. return len(self.data)
  158. def __getitem__(self, index):
  159. # print(self.data[index][0])
  160. image = cv2.imread(self.data[index][0]) # 读取图片
  161. if self.tag == 'train' and torch.rand(1) < self.noise_probability: # 使用数据加噪
  162. image = self.noise(image=image)['image']
  163. image = self.transform(image=image)['image'] # 缩放和填充图片
  164. image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # 转为RGB通道
  165. image = self._image_deal(image) # 归一化、转换为tensor、调维度
  166. label = torch.tensor(self.data[index][1], dtype=torch.float32) # 转换为tensor
  167. return image, label
  168. def _image_deal(self, image): # 归一化、转换为tensor、调维度
  169. image = torch.tensor(image / 255, dtype=torch.float32).permute(2, 0, 1)
  170. return image