yolox_pytorch_blackbox_process.py 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. """
  2. yolox基于pytorch框架的黑盒水印处理验证流程
  3. """
  4. import os
  5. import cv2
  6. import numpy as np
  7. import onnxruntime
  8. from watermark_verify.process.general_process_define import BlackBoxWatermarkProcessDefine
  9. from watermark_verify.tools import parse_qrcode_label_file
  10. from watermark_verify.tools.evaluate_tool import calculate_ciou
  11. class ClassificationProcess(BlackBoxWatermarkProcessDefine):
  12. def __init__(self, model_filename):
  13. super(ClassificationProcess, self).__init__(model_filename)
  14. def process(self) -> bool:
  15. """
  16. 根据流程定义进行处理,并返回模型标签验证结果
  17. :return: 模型标签验证结果
  18. """
  19. # 获取权重文件,使用触发集进行模型推理, 将推理结果与触发集预先二维码保存位置进行比对,在误差范围内则进行下一步,否则返回False
  20. cls_image_mapping = parse_qrcode_label_file.parse_labels(self.qrcode_positions_file)
  21. accessed_cls = set()
  22. for cls, images in cls_image_mapping.items():
  23. for image in images:
  24. image_path = os.path.join(self.trigger_dir, image)
  25. detect_result = self.detect_secret_label(image_path, self.model_filename, self.qrcode_positions_file, (640, 640))
  26. if detect_result:
  27. accessed_cls.add(cls)
  28. break
  29. if not accessed_cls == set(cls_image_mapping.keys()): # 所有的分类都检测出模型水印,模型水印检测结果为True
  30. return False
  31. verify_result = self.verify_label() # 模型标签检测通过,进行标签验证
  32. return verify_result
  33. def preprocess_image(self, image_path, input_size, swap=(2, 0, 1)):
  34. """
  35. 对输入图片进行预处理
  36. :param swap: 维度变换元组,默认按照(2,0,1)进行变换
  37. :param image_path: 图片路径
  38. :param input_size: 模型输入大小
  39. :return: 图片经过处理完成的ndarray
  40. """
  41. img = cv2.imread(image_path)
  42. if len(img.shape) == 3:
  43. padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114
  44. else:
  45. padded_img = np.ones(input_size, dtype=np.uint8) * 114
  46. r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
  47. resized_img = cv2.resize(
  48. img,
  49. (int(img.shape[1] * r), int(img.shape[0] * r)),
  50. interpolation=cv2.INTER_LINEAR,
  51. ).astype(np.uint8)
  52. padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
  53. padded_img = padded_img.transpose(swap).copy()
  54. padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
  55. height, width, channels = img.shape
  56. return padded_img, r, height, width, channels
  57. def detect_secret_label(self, image_path, model_file, watermark_txt, input_shape) -> bool:
  58. """
  59. 对模型使用触发集进行检查,判断是否存在黑盒模型水印,如果对嵌入水印的图片样本正确率高于阈值,证明模型存在黑盒水印
  60. :param image_path: 输入图像路径
  61. :param model_file: 模型文件路径
  62. :param watermark_txt: 水印标签文件路径
  63. :param input_shape: 模型输入图像大小,tuple
  64. :return: 检测结果
  65. """
  66. img, ratio, height, width, channels = self.preprocess_image(image_path, input_shape)
  67. x_center, y_center, w, h, cls = parse_qrcode_label_file.load_watermark_info(watermark_txt, image_path)
  68. # 计算绝对坐标
  69. x1 = (x_center - w / 2) * width
  70. y1 = (y_center - h / 2) * height
  71. x2 = (x_center + w / 2) * width
  72. y2 = (y_center + h / 2) * height
  73. watermark_box = [x1, y1, x2, y2, cls]
  74. if len(watermark_box) == 0:
  75. return False
  76. session = onnxruntime.InferenceSession(model_file)
  77. ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]}
  78. output = session.run(None, ort_inputs)
  79. predictions = postprocess(output[0], input_shape)[0]
  80. boxes = predictions[:, :4]
  81. scores = predictions[:, 4:5] * predictions[:, 5:]
  82. boxes_xyxy = np.ones_like(boxes)
  83. boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.
  84. boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.
  85. boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.
  86. boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.
  87. boxes_xyxy /= ratio
  88. dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
  89. if dets is not None:
  90. detect_result = detect_watermark(dets, watermark_box)
  91. return detect_result
  92. else:
  93. return False
  94. def postprocess(outputs, img_size, p6=False):
  95. grids = []
  96. expanded_strides = []
  97. strides = [8, 16, 32] if not p6 else [8, 16, 32, 64]
  98. hsizes = [img_size[0] // stride for stride in strides]
  99. wsizes = [img_size[1] // stride for stride in strides]
  100. for hsize, wsize, stride in zip(hsizes, wsizes, strides):
  101. xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
  102. grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
  103. grids.append(grid)
  104. shape = grid.shape[:2]
  105. expanded_strides.append(np.full((*shape, 1), stride))
  106. grids = np.concatenate(grids, 1)
  107. expanded_strides = np.concatenate(expanded_strides, 1)
  108. outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
  109. outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
  110. return outputs
  111. def nms(boxes, scores, nms_thr):
  112. """Single class NMS implemented in Numpy."""
  113. x1 = boxes[:, 0]
  114. y1 = boxes[:, 1]
  115. x2 = boxes[:, 2]
  116. y2 = boxes[:, 3]
  117. areas = (x2 - x1 + 1) * (y2 - y1 + 1)
  118. order = scores.argsort()[::-1]
  119. keep = []
  120. while order.size > 0:
  121. i = order[0]
  122. keep.append(i)
  123. xx1 = np.maximum(x1[i], x1[order[1:]])
  124. yy1 = np.maximum(y1[i], y1[order[1:]])
  125. xx2 = np.minimum(x2[i], x2[order[1:]])
  126. yy2 = np.minimum(y2[i], y2[order[1:]])
  127. w = np.maximum(0.0, xx2 - xx1 + 1)
  128. h = np.maximum(0.0, yy2 - yy1 + 1)
  129. inter = w * h
  130. ovr = inter / (areas[i] + areas[order[1:]] - inter)
  131. inds = np.where(ovr <= nms_thr)[0]
  132. order = order[inds + 1]
  133. return keep
  134. def multiclass_nms_class_agnostic(boxes, scores, nms_thr, score_thr):
  135. """Multiclass NMS implemented in Numpy. Class-agnostic version."""
  136. cls_inds = scores.argmax(1)
  137. cls_scores = scores[np.arange(len(cls_inds)), cls_inds]
  138. valid_score_mask = cls_scores > score_thr
  139. if valid_score_mask.sum() == 0:
  140. return None
  141. valid_scores = cls_scores[valid_score_mask]
  142. valid_boxes = boxes[valid_score_mask]
  143. valid_cls_inds = cls_inds[valid_score_mask]
  144. keep = nms(valid_boxes, valid_scores, nms_thr)
  145. if keep:
  146. dets = np.concatenate(
  147. [valid_boxes[keep], valid_scores[keep, None], valid_cls_inds[keep, None]], 1
  148. )
  149. return dets
  150. def multiclass_nms_class_aware(boxes, scores, nms_thr, score_thr):
  151. """Multiclass NMS implemented in Numpy. Class-aware version."""
  152. final_dets = []
  153. num_classes = scores.shape[1]
  154. for cls_ind in range(num_classes):
  155. cls_scores = scores[:, cls_ind]
  156. valid_score_mask = cls_scores > score_thr
  157. if valid_score_mask.sum() == 0:
  158. continue
  159. else:
  160. valid_scores = cls_scores[valid_score_mask]
  161. valid_boxes = boxes[valid_score_mask]
  162. keep = nms(valid_boxes, valid_scores, nms_thr)
  163. if len(keep) > 0:
  164. cls_inds = np.ones((len(keep), 1)) * cls_ind
  165. dets = np.concatenate(
  166. [valid_boxes[keep], valid_scores[keep, None], cls_inds], 1
  167. )
  168. final_dets.append(dets)
  169. if len(final_dets) == 0:
  170. return None
  171. return np.concatenate(final_dets, 0)
  172. def multiclass_nms(boxes, scores, nms_thr, score_thr, class_agnostic=True):
  173. """Multiclass NMS implemented in Numpy"""
  174. if class_agnostic:
  175. nms_method = multiclass_nms_class_agnostic
  176. else:
  177. nms_method = multiclass_nms_class_aware
  178. return nms_method(boxes, scores, nms_thr, score_thr)
  179. def detect_watermark(dets, watermark_box, threshold=0.5):
  180. if dets.size == 0: # 检查是否为空
  181. return False
  182. for box, score, cls in zip(dets[:, :4], dets[:, 4], dets[:, 5]):
  183. wm_box_coords = watermark_box[:4]
  184. wm_cls = watermark_box[4]
  185. if cls == wm_cls:
  186. ciou = calculate_ciou(box, wm_box_coords)
  187. if ciou > threshold:
  188. return True
  189. return False