#!/usr/bin/env python3 # Copyright (c) Megvii, Inc. and its affiliates. import os import cv2 import numpy as np import onnxruntime COCO_CLASSES = ( "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush", ) _COLORS = np.array( [ 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, 1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000, 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500, 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667, 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286, 0.286, 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714, 0.714, 0.857, 0.857, 0.857, 0.000, 0.447, 0.741, 0.314, 0.717, 0.741, 0.50, 0.5, 0 ] ).astype(np.float32).reshape(-1, 3) def preproc(img, input_size, swap=(2, 0, 1)): if len(img.shape) == 3: padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114 else: padded_img = np.ones(input_size, dtype=np.uint8) * 114 r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1]) resized_img = cv2.resize( img, (int(img.shape[1] * r), int(img.shape[0] * r)), interpolation=cv2.INTER_LINEAR, ).astype(np.uint8) padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img padded_img = padded_img.transpose(swap) padded_img = np.ascontiguousarray(padded_img, dtype=np.float32) return padded_img, r def nms(boxes, scores, nms_thr): """Single class NMS implemented in Numpy.""" x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] areas = (x2 - x1 + 1) * (y2 - y1 + 1) order = scores.argsort()[::-1] keep = [] while order.size > 0: i = order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h ovr = inter / (areas[i] + areas[order[1:]] - inter) inds = np.where(ovr <= nms_thr)[0] order = order[inds + 1] return keep def demo_postprocess(outputs, img_size, p6=False): grids = [] expanded_strides = [] strides = [8, 16, 32] if not p6 else [8, 16, 32, 64] hsizes = [img_size[0] // stride for stride in strides] wsizes = [img_size[1] // stride for stride in strides] for hsize, wsize, stride in zip(hsizes, wsizes, strides): xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize)) grid = np.stack((xv, yv), 2).reshape(1, -1, 2) grids.append(grid) shape = grid.shape[:2] expanded_strides.append(np.full((*shape, 1), stride)) grids = np.concatenate(grids, 1) expanded_strides = np.concatenate(expanded_strides, 1) outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides return outputs def multiclass_nms_class_agnostic(boxes, scores, nms_thr, score_thr): """Multiclass NMS implemented in Numpy. Class-agnostic version.""" cls_inds = scores.argmax(1) cls_scores = scores[np.arange(len(cls_inds)), cls_inds] valid_score_mask = cls_scores > score_thr if valid_score_mask.sum() == 0: return None valid_scores = cls_scores[valid_score_mask] valid_boxes = boxes[valid_score_mask] valid_cls_inds = cls_inds[valid_score_mask] keep = nms(valid_boxes, valid_scores, nms_thr) if keep: dets = np.concatenate( [valid_boxes[keep], valid_scores[keep, None], valid_cls_inds[keep, None]], 1 ) return dets def multiclass_nms_class_aware(boxes, scores, nms_thr, score_thr): """Multiclass NMS implemented in Numpy. Class-aware version.""" final_dets = [] num_classes = scores.shape[1] for cls_ind in range(num_classes): cls_scores = scores[:, cls_ind] valid_score_mask = cls_scores > score_thr if valid_score_mask.sum() == 0: continue else: valid_scores = cls_scores[valid_score_mask] valid_boxes = boxes[valid_score_mask] keep = nms(valid_boxes, valid_scores, nms_thr) if len(keep) > 0: cls_inds = np.ones((len(keep), 1)) * cls_ind dets = np.concatenate( [valid_boxes[keep], valid_scores[keep, None], cls_inds], 1 ) final_dets.append(dets) if len(final_dets) == 0: return None return np.concatenate(final_dets, 0) def multiclass_nms(boxes, scores, nms_thr, score_thr, class_agnostic=True): """Multiclass NMS implemented in Numpy""" if class_agnostic: nms_method = multiclass_nms_class_agnostic else: nms_method = multiclass_nms_class_aware return nms_method(boxes, scores, nms_thr, score_thr) def vis(img, boxes, scores, cls_ids, conf=0.5, class_names=None): for i in range(len(boxes)): box = boxes[i] cls_id = int(cls_ids[i]) score = scores[i] if score < conf: continue x0 = int(box[0]) y0 = int(box[1]) x1 = int(box[2]) y1 = int(box[3]) color = (_COLORS[cls_id] * 255).astype(np.uint8).tolist() text = '{}:{:.1f}%'.format(class_names[cls_id], score * 100) txt_color = (0, 0, 0) if np.mean(_COLORS[cls_id]) > 0.5 else (255, 255, 255) font = cv2.FONT_HERSHEY_SIMPLEX txt_size = cv2.getTextSize(text, font, 0.4, 1)[0] cv2.rectangle(img, (x0, y0), (x1, y1), color, 2) txt_bk_color = (_COLORS[cls_id] * 255 * 0.7).astype(np.uint8).tolist() cv2.rectangle( img, (x0, y0 + 1), (x0 + txt_size[0] + 1, y0 + int(1.5 * txt_size[1])), txt_bk_color, -1 ) cv2.putText(img, text, (x0, y0 + txt_size[1]), font, 0.4, txt_color, thickness=1) return img def load_watermark_info(watermark_txt, img_width, img_height, image_path): """ 从标签文件中加载指定图片二维码嵌入坐标及所属类别 :param watermark_txt: 标签文件 :param img_width: 图像宽度 :param img_height: 图像高度 :param image_path: 图片路径 :return: [x1, y1, x2, y2, cls] """ with open(watermark_txt, 'r') as f: for line in f.readlines(): parts = line.strip().split() filename = parts[0] filename = os.path.basename(filename) if filename == os.path.basename(image_path): x_center, y_center, w, h = map(float, parts[1:5]) cls = int(float(parts[5])) # 转换类别为整数 # 计算绝对坐标 x1 = (x_center - w / 2) * img_width y1 = (y_center - h / 2) * img_height x2 = (x_center + w / 2) * img_width y2 = (y_center + h / 2) * img_height return [x1, y1, x2, y2, cls] return [] def compute_ciou(box1, box2): """计算CIoU,假设box格式为[x1, y1, x2, y2]""" x1, y1, x2, y2 = box1 x1g, y1g, x2g, y2g = box2 # 求交集面积 xi1, yi1 = max(x1, x1g), max(y1, y1g) xi2, yi2 = min(x2, x2g), min(y2, y2g) inter_area = max(0, xi2 - xi1) * max(0, yi2 - yi1) # 求各自面积 box_area = (x2 - x1) * (y2 - y1) boxg_area = (x2g - x1g) * (y2g - y1g) # 求并集面积 union_area = box_area + boxg_area - inter_area # 求IoU iou = inter_area / union_area # 求CIoU额外项 cw = max(x2, x2g) - min(x1, x1g) ch = max(y2, y2g) - min(y1, y1g) c2 = cw ** 2 + ch ** 2 rho2 = ((x1 + x2 - x1g - x2g) ** 2 + (y1 + y2 - y1g - y2g) ** 2) / 4 ciou = iou - (rho2 / c2) return ciou def detect_watermark(dets, watermark_box, threshold=0.5): for box, score, cls in zip(dets[:, :4], dets[:, 4], dets[:, 5]): wm_box_coords = watermark_box[:4] wm_cls = watermark_box[4] if cls == wm_cls: ciou = compute_ciou(box, wm_box_coords) if ciou > threshold: return True return False if __name__ == '__main__': test_img = "000000000030.jpg" model_file = "yolox_s.onnx" output_dir = "./output" watermark_txt = "./trigger/qrcode_positions.txt" input_shape = (640, 640) origin_img = cv2.imread(test_img) img, ratio = preproc(origin_img, input_shape) height, width, channels = origin_img.shape watermark_box = load_watermark_info(watermark_txt, width, height, test_img) session = onnxruntime.InferenceSession(model_file) ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]} output = session.run(None, ort_inputs) predictions = demo_postprocess(output[0], input_shape)[0] boxes = predictions[:, :4] scores = predictions[:, 4:5] * predictions[:, 5:] boxes_xyxy = np.ones_like(boxes) boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2. boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2. boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2. boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2. boxes_xyxy /= ratio dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1) # dets = np.vstack((dets, [2.9999999999999982, 234.0, 65.0, 296.0, 1.0, 0])) if dets is not None: final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5] origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds, conf=0.3, class_names=COCO_CLASSES) if detect_watermark(dets, watermark_box): print("检测到黑盒水印") else: print("未检测到黑盒水印") os.makedirs(output_dir, exist_ok=True) output_path = os.path.join(output_dir, os.path.basename(test_img)) cv2.imwrite(output_path, origin_img)