yolox.py 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. import cv2
  2. import numpy as np
  3. import onnxruntime
  4. from watermark_verify.tools import parse_qrcode_label_file
  5. def preproc(img, input_size, swap=(2, 0, 1)):
  6. if len(img.shape) == 3:
  7. padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114
  8. else:
  9. padded_img = np.ones(input_size, dtype=np.uint8) * 114
  10. r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
  11. resized_img = cv2.resize(
  12. img,
  13. (int(img.shape[1] * r), int(img.shape[0] * r)),
  14. interpolation=cv2.INTER_LINEAR,
  15. ).astype(np.uint8)
  16. padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
  17. padded_img = padded_img.transpose(swap)
  18. padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
  19. return padded_img, r
  20. def demo_postprocess(outputs, img_size, p6=False):
  21. grids = []
  22. expanded_strides = []
  23. strides = [8, 16, 32] if not p6 else [8, 16, 32, 64]
  24. hsizes = [img_size[0] // stride for stride in strides]
  25. wsizes = [img_size[1] // stride for stride in strides]
  26. for hsize, wsize, stride in zip(hsizes, wsizes, strides):
  27. xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
  28. grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
  29. grids.append(grid)
  30. shape = grid.shape[:2]
  31. expanded_strides.append(np.full((*shape, 1), stride))
  32. grids = np.concatenate(grids, 1)
  33. expanded_strides = np.concatenate(expanded_strides, 1)
  34. outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
  35. outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
  36. return outputs
  37. def nms(boxes, scores, nms_thr):
  38. """Single class NMS implemented in Numpy."""
  39. x1 = boxes[:, 0]
  40. y1 = boxes[:, 1]
  41. x2 = boxes[:, 2]
  42. y2 = boxes[:, 3]
  43. areas = (x2 - x1 + 1) * (y2 - y1 + 1)
  44. order = scores.argsort()[::-1]
  45. keep = []
  46. while order.size > 0:
  47. i = order[0]
  48. keep.append(i)
  49. xx1 = np.maximum(x1[i], x1[order[1:]])
  50. yy1 = np.maximum(y1[i], y1[order[1:]])
  51. xx2 = np.minimum(x2[i], x2[order[1:]])
  52. yy2 = np.minimum(y2[i], y2[order[1:]])
  53. w = np.maximum(0.0, xx2 - xx1 + 1)
  54. h = np.maximum(0.0, yy2 - yy1 + 1)
  55. inter = w * h
  56. ovr = inter / (areas[i] + areas[order[1:]] - inter)
  57. inds = np.where(ovr <= nms_thr)[0]
  58. order = order[inds + 1]
  59. return keep
  60. def multiclass_nms_class_agnostic(boxes, scores, nms_thr, score_thr):
  61. """Multiclass NMS implemented in Numpy. Class-agnostic version."""
  62. cls_inds = scores.argmax(1)
  63. cls_scores = scores[np.arange(len(cls_inds)), cls_inds]
  64. valid_score_mask = cls_scores > score_thr
  65. if valid_score_mask.sum() == 0:
  66. return None
  67. valid_scores = cls_scores[valid_score_mask]
  68. valid_boxes = boxes[valid_score_mask]
  69. valid_cls_inds = cls_inds[valid_score_mask]
  70. keep = nms(valid_boxes, valid_scores, nms_thr)
  71. if keep:
  72. dets = np.concatenate(
  73. [valid_boxes[keep], valid_scores[keep, None], valid_cls_inds[keep, None]], 1
  74. )
  75. return dets
  76. def multiclass_nms_class_aware(boxes, scores, nms_thr, score_thr):
  77. """Multiclass NMS implemented in Numpy. Class-aware version."""
  78. final_dets = []
  79. num_classes = scores.shape[1]
  80. for cls_ind in range(num_classes):
  81. cls_scores = scores[:, cls_ind]
  82. valid_score_mask = cls_scores > score_thr
  83. if valid_score_mask.sum() == 0:
  84. continue
  85. else:
  86. valid_scores = cls_scores[valid_score_mask]
  87. valid_boxes = boxes[valid_score_mask]
  88. keep = nms(valid_boxes, valid_scores, nms_thr)
  89. if len(keep) > 0:
  90. cls_inds = np.ones((len(keep), 1)) * cls_ind
  91. dets = np.concatenate(
  92. [valid_boxes[keep], valid_scores[keep, None], cls_inds], 1
  93. )
  94. final_dets.append(dets)
  95. if len(final_dets) == 0:
  96. return None
  97. return np.concatenate(final_dets, 0)
  98. def multiclass_nms(boxes, scores, nms_thr, score_thr, class_agnostic=True):
  99. """Multiclass NMS implemented in Numpy"""
  100. if class_agnostic:
  101. nms_method = multiclass_nms_class_agnostic
  102. else:
  103. nms_method = multiclass_nms_class_aware
  104. return nms_method(boxes, scores, nms_thr, score_thr)
  105. def compute_ciou(box1, box2):
  106. """计算CIoU,假设box格式为[x1, y1, x2, y2]"""
  107. x1, y1, x2, y2 = box1
  108. x1g, y1g, x2g, y2g = box2
  109. # 求交集面积
  110. xi1, yi1 = max(x1, x1g), max(y1, y1g)
  111. xi2, yi2 = min(x2, x2g), min(y2, y2g)
  112. inter_area = max(0, xi2 - xi1) * max(0, yi2 - yi1)
  113. # 求各自面积
  114. box_area = (x2 - x1) * (y2 - y1)
  115. boxg_area = (x2g - x1g) * (y2g - y1g)
  116. # 求并集面积
  117. union_area = box_area + boxg_area - inter_area
  118. # 求IoU
  119. iou = inter_area / union_area
  120. # 求CIoU额外项
  121. cw = max(x2, x2g) - min(x1, x1g)
  122. ch = max(y2, y2g) - min(y1, y1g)
  123. c2 = cw ** 2 + ch ** 2
  124. rho2 = ((x1 + x2 - x1g - x2g) ** 2 + (y1 + y2 - y1g - y2g) ** 2) / 4
  125. ciou = iou - (rho2 / c2)
  126. return ciou
  127. def detect_watermark(dets, watermark_boxes, threshold=0.5):
  128. for box, score, cls in zip(dets[:, :4], dets[:, 4], dets[:, 5]):
  129. for wm_box in watermark_boxes:
  130. wm_box_coords = wm_box[:4]
  131. wm_cls = wm_box[4]
  132. if cls == wm_cls:
  133. ciou = compute_ciou(box, wm_box_coords)
  134. if ciou > threshold:
  135. return True
  136. return False
  137. def predict_and_detect(image_path, model_file, watermark_txt, input_shape) -> bool:
  138. """
  139. 使用指定onnx文件进行预测并进行黑盒水印检测
  140. :param image_path: 输入图像路径
  141. :param model_file: 模型文件路径
  142. :param watermark_txt: 水印标签文件路径
  143. :param input_shape: 模型输入图像大小,tuple
  144. :return:
  145. """
  146. origin_img = cv2.imread(image_path)
  147. img, ratio = preproc(origin_img, input_shape)
  148. height, width, channels = origin_img.shape
  149. watermark_boxes = parse_qrcode_label_file.load_watermark_info(watermark_txt, width, height)
  150. session = onnxruntime.InferenceSession(model_file)
  151. ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]}
  152. output = session.run(None, ort_inputs)
  153. predictions = demo_postprocess(output[0], input_shape)[0]
  154. boxes = predictions[:, :4]
  155. scores = predictions[:, 4:5] * predictions[:, 5:]
  156. boxes_xyxy = np.ones_like(boxes)
  157. boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.
  158. boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.
  159. boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.
  160. boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.
  161. boxes_xyxy /= ratio
  162. dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
  163. if dets is not None:
  164. detect_result = detect_watermark(dets, watermark_boxes.get(image_path, []))
  165. return detect_result
  166. else:
  167. return False