yolox_pytorch_blackbox_process.py 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. """
  2. yolox基于pytorch框架的黑盒水印处理验证流程
  3. """
  4. import os
  5. import cv2
  6. import numpy as np
  7. import onnxruntime
  8. from watermark_verify.process.general_process_define import BlackBoxWatermarkProcessDefine
  9. from watermark_verify.tools import parse_qrcode_label_file
  10. from watermark_verify.tools.evaluate_tool import calculate_ciou
  11. class DetectionProcess(BlackBoxWatermarkProcessDefine):
  12. def __init__(self, model_filename):
  13. super(DetectionProcess, self).__init__(model_filename)
  14. def process(self) -> bool:
  15. """
  16. 根据流程定义进行处理,并返回模型标签验证结果
  17. :return: 模型标签验证结果
  18. """
  19. # 获取权重文件,使用触发集进行模型推理, 将推理结果与触发集预先二维码保存位置进行比对,在误差范围内则进行下一步,否则返回False
  20. cls_image_mapping = parse_qrcode_label_file.parse_labels(self.qrcode_positions_file)
  21. accessed_cls = set()
  22. for cls, images in cls_image_mapping.items():
  23. for image in images:
  24. image_path = os.path.join(self.trigger_dir, image)
  25. detect_result = self.detect_secret_label(image_path, self.model_filename, self.qrcode_positions_file, (640, 640))
  26. if detect_result:
  27. accessed_cls.add(cls)
  28. break
  29. if not accessed_cls == set(cls_image_mapping.keys()): # 所有的分类都检测出模型水印,模型水印检测结果为True
  30. return False
  31. verify_result = self.verify_label() # 模型标签检测通过,进行标签验证
  32. return verify_result
  33. def preprocess_image(self, image_path, input_size, swap=(2, 0, 1)):
  34. """
  35. 对输入图片进行预处理
  36. :param swap: 维度变换元组,默认按照(2,0,1)进行变换
  37. :param image_path: 图片路径
  38. :param input_size: 模型输入大小
  39. :return: 图片经过处理完成的ndarray
  40. """
  41. img = cv2.imread(image_path)
  42. if len(img.shape) == 3:
  43. padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114
  44. else:
  45. padded_img = np.ones(input_size, dtype=np.uint8) * 114
  46. r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
  47. resized_img = cv2.resize(
  48. img,
  49. (int(img.shape[1] * r), int(img.shape[0] * r)),
  50. interpolation=cv2.INTER_LINEAR,
  51. ).astype(np.uint8)
  52. padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
  53. padded_img = padded_img.transpose(swap).copy()
  54. padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
  55. height, width, channels = img.shape
  56. return padded_img, r, height, width, channels
  57. def detect_secret_label(self, image_path, model_file, watermark_txt, input_shape) -> bool:
  58. """
  59. 对模型使用触发集进行检查,判断是否存在黑盒模型水印,如果对嵌入水印的图片样本正确率高于阈值,证明模型存在黑盒水印
  60. :param image_path: 输入图像路径
  61. :param model_file: 模型文件路径
  62. :param watermark_txt: 水印标签文件路径
  63. :param input_shape: 模型输入图像大小,tuple
  64. :return: 检测结果
  65. """
  66. img, ratio, height, width, channels = self.preprocess_image(image_path, input_shape)
  67. x_center, y_center, w, h, cls = parse_qrcode_label_file.load_watermark_info(watermark_txt, image_path)
  68. # 计算绝对坐标
  69. x1 = (x_center - w / 2) * width
  70. y1 = (y_center - h / 2) * height
  71. x2 = (x_center + w / 2) * width
  72. y2 = (y_center + h / 2) * height
  73. watermark_box = [x1, y1, x2, y2, cls]
  74. if len(watermark_box) == 0:
  75. return False
  76. session = onnxruntime.InferenceSession(model_file)
  77. ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]}
  78. output = session.run(None, ort_inputs)
  79. dets = postprocess(output[0], input_shape, ratio)[0]
  80. if dets is not None:
  81. detect_result = detect_watermark(dets, watermark_box)
  82. return detect_result
  83. else:
  84. return False
  85. def postprocess(outputs, img_size, ratio, p6=False):
  86. grids = []
  87. expanded_strides = []
  88. outputs = outputs[0]
  89. strides = [8, 16, 32] if not p6 else [8, 16, 32, 64]
  90. hsizes = [img_size[0] // stride for stride in strides]
  91. wsizes = [img_size[1] // stride for stride in strides]
  92. for hsize, wsize, stride in zip(hsizes, wsizes, strides):
  93. xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
  94. grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
  95. grids.append(grid)
  96. shape = grid.shape[:2]
  97. expanded_strides.append(np.full((*shape, 1), stride))
  98. grids = np.concatenate(grids, 1)
  99. expanded_strides = np.concatenate(expanded_strides, 1)
  100. outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
  101. outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
  102. boxes = outputs[:, :4]
  103. scores = outputs[:, 4:5] * outputs[:, 5:]
  104. boxes_xyxy = np.ones_like(boxes)
  105. boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.
  106. boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.
  107. boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.
  108. boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.
  109. boxes_xyxy /= ratio
  110. dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
  111. return dets
  112. def nms(boxes, scores, nms_thr):
  113. """Single class NMS implemented in Numpy."""
  114. x1 = boxes[:, 0]
  115. y1 = boxes[:, 1]
  116. x2 = boxes[:, 2]
  117. y2 = boxes[:, 3]
  118. areas = (x2 - x1 + 1) * (y2 - y1 + 1)
  119. order = scores.argsort()[::-1]
  120. keep = []
  121. while order.size > 0:
  122. i = order[0]
  123. keep.append(i)
  124. xx1 = np.maximum(x1[i], x1[order[1:]])
  125. yy1 = np.maximum(y1[i], y1[order[1:]])
  126. xx2 = np.minimum(x2[i], x2[order[1:]])
  127. yy2 = np.minimum(y2[i], y2[order[1:]])
  128. w = np.maximum(0.0, xx2 - xx1 + 1)
  129. h = np.maximum(0.0, yy2 - yy1 + 1)
  130. inter = w * h
  131. ovr = inter / (areas[i] + areas[order[1:]] - inter)
  132. inds = np.where(ovr <= nms_thr)[0]
  133. order = order[inds + 1]
  134. return keep
  135. def multiclass_nms_class_agnostic(boxes, scores, nms_thr, score_thr):
  136. """Multiclass NMS implemented in Numpy. Class-agnostic version."""
  137. cls_inds = scores.argmax(1)
  138. cls_scores = scores[np.arange(len(cls_inds)), cls_inds]
  139. valid_score_mask = cls_scores > score_thr
  140. if valid_score_mask.sum() == 0:
  141. return None
  142. valid_scores = cls_scores[valid_score_mask]
  143. valid_boxes = boxes[valid_score_mask]
  144. valid_cls_inds = cls_inds[valid_score_mask]
  145. keep = nms(valid_boxes, valid_scores, nms_thr)
  146. if keep:
  147. dets = np.concatenate(
  148. [valid_boxes[keep], valid_scores[keep, None], valid_cls_inds[keep, None]], 1
  149. )
  150. return dets
  151. def multiclass_nms_class_aware(boxes, scores, nms_thr, score_thr):
  152. """Multiclass NMS implemented in Numpy. Class-aware version."""
  153. final_dets = []
  154. num_classes = scores.shape[1]
  155. for cls_ind in range(num_classes):
  156. cls_scores = scores[:, cls_ind]
  157. valid_score_mask = cls_scores > score_thr
  158. if valid_score_mask.sum() == 0:
  159. continue
  160. else:
  161. valid_scores = cls_scores[valid_score_mask]
  162. valid_boxes = boxes[valid_score_mask]
  163. keep = nms(valid_boxes, valid_scores, nms_thr)
  164. if len(keep) > 0:
  165. cls_inds = np.ones((len(keep), 1)) * cls_ind
  166. dets = np.concatenate(
  167. [valid_boxes[keep], valid_scores[keep, None], cls_inds], 1
  168. )
  169. final_dets.append(dets)
  170. if len(final_dets) == 0:
  171. return None
  172. return np.concatenate(final_dets, 0)
  173. def multiclass_nms(boxes, scores, nms_thr, score_thr, class_agnostic=True):
  174. """Multiclass NMS implemented in Numpy"""
  175. if class_agnostic:
  176. nms_method = multiclass_nms_class_agnostic
  177. else:
  178. nms_method = multiclass_nms_class_aware
  179. return nms_method(boxes, scores, nms_thr, score_thr)
  180. def detect_watermark(dets, watermark_box, threshold=0.5):
  181. if dets.size == 0: # 检查是否为空
  182. return False
  183. for box, score, cls in zip(dets[:, :4], dets[:, 4], dets[:, 5]):
  184. wm_box_coords = watermark_box[:4]
  185. wm_cls = watermark_box[4]
  186. if cls == wm_cls:
  187. ciou = calculate_ciou(box, wm_box_coords)
  188. if ciou > threshold:
  189. return True
  190. return False