Browse Source

添加测试代码

liyan 11 months ago
parent
commit
c7d7a593a7

+ 68 - 0
tests/classfication_model_watermark_detect_test.py

@@ -0,0 +1,68 @@
+import os
+import onnxruntime as ort
+from torchvision import transforms
+from PIL import Image
+import numpy as np
+
+# 模型加载
+onnx_model_path = 'your_model_path.onnx'
+session = ort.InferenceSession(onnx_model_path)
+
+# 图像预处理
+preprocess = transforms.Compose([
+    transforms.Resize((224, 224)),  # 根据你的模型输入大小调整
+    transforms.ToTensor(),
+])
+
+# 触发集目录和嵌入位置的TXT文件路径
+trigger_dir = 'path_to_trigger_images'
+location_file = 'path_to_location_txt.txt'
+
+# 读取嵌入位置的TXT文件
+embedding_positions = {}
+with open(location_file, 'r') as file:
+    for line in file:
+        # 假设TXT文件中每行的格式是: 文件名 x y width height
+        filename, x, y, width, height = line.strip().split()
+        embedding_positions[filename] = (int(x), int(y), int(width), int(height))
+
+# 扫描触发集目录并处理每张图像
+watermark_success_rates = []
+for img_name in os.listdir(trigger_dir):
+    if img_name in embedding_positions:
+        # 加载图像
+        img_path = os.path.join(trigger_dir, img_name)
+        image = Image.open(img_path).convert('RGB')
+
+        # 获取嵌入位置
+        x, y, width, height = embedding_positions[img_name]
+
+        # 裁剪出嵌入二维码的区域
+        cropped_image = image.crop((x, y, x + width, y + height))
+
+        # 图像预处理
+        input_tensor = preprocess(cropped_image).unsqueeze(0).numpy()  # 增加batch维度并转换为numpy
+
+        # 获取ONNX模型的输入名称
+        input_name = session.get_inputs()[0].name
+
+        # 模型推理
+        output = session.run(None, {input_name: input_tensor})
+
+        # 假设你有一个期望的标签,比如`expected_label`
+        predicted_label = np.argmax(output[0])
+        expected_label = 1  # 根据实际情况设置
+
+        # 判断预测是否正确
+        is_correct = (predicted_label == expected_label)
+        watermark_success_rates.append(is_correct)
+
+# 计算整体水印成功率
+overall_success_rate = np.mean(watermark_success_rates)
+
+# 输出结果
+threshold = 0.9  # 设置成功率阈值
+if overall_success_rate > threshold:
+    print(f"模型中嵌入了水印,水印成功率: {overall_success_rate * 100:.2f}%")
+else:
+    print("模型中未嵌入水印。")

+ 116 - 0
tests/model_watermark_detect_test.py

@@ -0,0 +1,116 @@
+import os
+import onnxruntime as ort
+import numpy as np
+from PIL import Image, ImageDraw
+import cv2
+
+# ONNX模型加载
+onnx_model_path = 'your_yolo_model.onnx'
+session = ort.InferenceSession(onnx_model_path)
+
+# YOLO模型输入要求
+input_shape = (640, 640)  # 根据YOLO模型的要求调整输入大小
+
+# 触发集目录和嵌入位置的TXT文件路径
+trigger_dir = 'path_to_trigger_images'
+location_file = 'path_to_location_txt.txt'
+
+# 读取嵌入位置的TXT文件
+embedding_positions = {}
+with open(location_file, 'r') as file:
+    for line in file:
+        # 假设TXT文件中每行的格式是: 文件名 x y width height
+        filename, x, y, width, height = line.strip().split()
+        embedding_positions[filename] = (int(x), int(y), int(width), int(height))
+
+
+# YOLO模型的前处理函数
+def preprocess_image(image, input_shape):
+    # Resize and pad the image to meet the input size requirements
+    image = image.resize(input_shape)
+    image_data = np.array(image).astype('float32')
+    image_data /= 255.0  # Normalize the image
+    image_data = np.transpose(image_data, (2, 0, 1))  # HWC to CHW
+    image_data = np.expand_dims(image_data, axis=0)  # Add batch dimension
+    return image_data
+
+
+# YOLO模型的后处理函数
+def postprocess_output(output, input_shape, original_shape, conf_threshold=0.5, iou_threshold=0.4):
+    boxes, scores, classes = [], [], []
+
+    for detection in output:
+        x_center, y_center, width, height, confidence, *probs = detection
+        if confidence < conf_threshold:
+            continue
+        x_min = int((x_center - width / 2) * original_shape[1] / input_shape[0])
+        y_min = int((y_center - height / 2) * original_shape[0] / input_shape[1])
+        x_max = int((x_center + width / 2) * original_shape[1] / input_shape[0])
+        y_max = int((y_center + height / 2) * original_shape[0] / input_shape[1])
+        class_id = np.argmax(probs)
+        score = probs[class_id] * confidence
+        if score > conf_threshold:
+            boxes.append([x_min, y_min, x_max, y_max])
+            scores.append(score)
+            classes.append(class_id)
+
+    # Apply non-max suppression to filter boxes
+    indices = cv2.dnn.NMSBoxes(boxes, scores, conf_threshold, iou_threshold)
+    final_boxes, final_scores, final_classes = [], [], []
+    for i in indices:
+        final_boxes.append(boxes[i[0]])
+        final_scores.append(scores[i[0]])
+        final_classes.append(classes[i[0]])
+
+    return final_boxes, final_scores, final_classes
+
+
+# 扫描触发集目录并处理每张图像
+watermark_success_rates = []
+for img_name in os.listdir(trigger_dir):
+    if img_name in embedding_positions:
+        # 加载图像
+        img_path = os.path.join(trigger_dir, img_name)
+        image = Image.open(img_path).convert('RGB')
+        original_shape = image.size
+
+        # 预处理图像
+        input_tensor = preprocess_image(image, input_shape)
+
+        # 获取ONNX模型的输入名称
+        input_name = session.get_inputs()[0].name
+
+        # 模型推理
+        output = session.run(None, {input_name: input_tensor})
+        output = np.squeeze(output[0])
+
+        # 后处理输出
+        boxes, scores, classes = postprocess_output(output, input_shape, original_shape)
+
+        # 获取嵌入位置
+        x, y, width, height = embedding_positions[img_name]
+        region = (x, y, x + width, y + height)
+
+        # 检查嵌入区域内是否有期望的检测目标
+        found = False
+        for box, class_id in zip(boxes, classes):
+            if class_id == expected_cls:
+                x_min, y_min, x_max, y_max = box
+                detected_region = (x_min, y_min, x_max, y_max)
+                # 检查检测到的区域是否在嵌入区域内
+                if (detected_region[0] >= region[0] and detected_region[2] <= region[2] and
+                        detected_region[1] >= region[1] and detected_region[3] <= region[3]):
+                    found = True
+                    break
+
+        watermark_success_rates.append(found)
+
+# 计算整体水印成功率
+overall_success_rate = np.mean(watermark_success_rates)
+
+# 输出结果
+threshold = 0.9  # 设置成功率阈值
+if overall_success_rate > threshold:
+    print(f"模型中嵌入了水印,水印成功率: {overall_success_rate * 100:.2f}%")
+else:
+    print("模型中未嵌入水印。")

+ 436 - 0
tests/onnx_inference.py

@@ -0,0 +1,436 @@
+#!/usr/bin/env python3
+# Copyright (c) Megvii, Inc. and its affiliates.
+import os
+
+import cv2
+import numpy as np
+
+import onnxruntime
+
+COCO_CLASSES = (
+    "person",
+    "bicycle",
+    "car",
+    "motorcycle",
+    "airplane",
+    "bus",
+    "train",
+    "truck",
+    "boat",
+    "traffic light",
+    "fire hydrant",
+    "stop sign",
+    "parking meter",
+    "bench",
+    "bird",
+    "cat",
+    "dog",
+    "horse",
+    "sheep",
+    "cow",
+    "elephant",
+    "bear",
+    "zebra",
+    "giraffe",
+    "backpack",
+    "umbrella",
+    "handbag",
+    "tie",
+    "suitcase",
+    "frisbee",
+    "skis",
+    "snowboard",
+    "sports ball",
+    "kite",
+    "baseball bat",
+    "baseball glove",
+    "skateboard",
+    "surfboard",
+    "tennis racket",
+    "bottle",
+    "wine glass",
+    "cup",
+    "fork",
+    "knife",
+    "spoon",
+    "bowl",
+    "banana",
+    "apple",
+    "sandwich",
+    "orange",
+    "broccoli",
+    "carrot",
+    "hot dog",
+    "pizza",
+    "donut",
+    "cake",
+    "chair",
+    "couch",
+    "potted plant",
+    "bed",
+    "dining table",
+    "toilet",
+    "tv",
+    "laptop",
+    "mouse",
+    "remote",
+    "keyboard",
+    "cell phone",
+    "microwave",
+    "oven",
+    "toaster",
+    "sink",
+    "refrigerator",
+    "book",
+    "clock",
+    "vase",
+    "scissors",
+    "teddy bear",
+    "hair drier",
+    "toothbrush",
+)
+
+_COLORS = np.array(
+    [
+        0.000, 0.447, 0.741,
+        0.850, 0.325, 0.098,
+        0.929, 0.694, 0.125,
+        0.494, 0.184, 0.556,
+        0.466, 0.674, 0.188,
+        0.301, 0.745, 0.933,
+        0.635, 0.078, 0.184,
+        0.300, 0.300, 0.300,
+        0.600, 0.600, 0.600,
+        1.000, 0.000, 0.000,
+        1.000, 0.500, 0.000,
+        0.749, 0.749, 0.000,
+        0.000, 1.000, 0.000,
+        0.000, 0.000, 1.000,
+        0.667, 0.000, 1.000,
+        0.333, 0.333, 0.000,
+        0.333, 0.667, 0.000,
+        0.333, 1.000, 0.000,
+        0.667, 0.333, 0.000,
+        0.667, 0.667, 0.000,
+        0.667, 1.000, 0.000,
+        1.000, 0.333, 0.000,
+        1.000, 0.667, 0.000,
+        1.000, 1.000, 0.000,
+        0.000, 0.333, 0.500,
+        0.000, 0.667, 0.500,
+        0.000, 1.000, 0.500,
+        0.333, 0.000, 0.500,
+        0.333, 0.333, 0.500,
+        0.333, 0.667, 0.500,
+        0.333, 1.000, 0.500,
+        0.667, 0.000, 0.500,
+        0.667, 0.333, 0.500,
+        0.667, 0.667, 0.500,
+        0.667, 1.000, 0.500,
+        1.000, 0.000, 0.500,
+        1.000, 0.333, 0.500,
+        1.000, 0.667, 0.500,
+        1.000, 1.000, 0.500,
+        0.000, 0.333, 1.000,
+        0.000, 0.667, 1.000,
+        0.000, 1.000, 1.000,
+        0.333, 0.000, 1.000,
+        0.333, 0.333, 1.000,
+        0.333, 0.667, 1.000,
+        0.333, 1.000, 1.000,
+        0.667, 0.000, 1.000,
+        0.667, 0.333, 1.000,
+        0.667, 0.667, 1.000,
+        0.667, 1.000, 1.000,
+        1.000, 0.000, 1.000,
+        1.000, 0.333, 1.000,
+        1.000, 0.667, 1.000,
+        0.333, 0.000, 0.000,
+        0.500, 0.000, 0.000,
+        0.667, 0.000, 0.000,
+        0.833, 0.000, 0.000,
+        1.000, 0.000, 0.000,
+        0.000, 0.167, 0.000,
+        0.000, 0.333, 0.000,
+        0.000, 0.500, 0.000,
+        0.000, 0.667, 0.000,
+        0.000, 0.833, 0.000,
+        0.000, 1.000, 0.000,
+        0.000, 0.000, 0.167,
+        0.000, 0.000, 0.333,
+        0.000, 0.000, 0.500,
+        0.000, 0.000, 0.667,
+        0.000, 0.000, 0.833,
+        0.000, 0.000, 1.000,
+        0.000, 0.000, 0.000,
+        0.143, 0.143, 0.143,
+        0.286, 0.286, 0.286,
+        0.429, 0.429, 0.429,
+        0.571, 0.571, 0.571,
+        0.714, 0.714, 0.714,
+        0.857, 0.857, 0.857,
+        0.000, 0.447, 0.741,
+        0.314, 0.717, 0.741,
+        0.50, 0.5, 0
+    ]
+).astype(np.float32).reshape(-1, 3)
+
+
+def preproc(img, input_size, swap=(2, 0, 1)):
+    if len(img.shape) == 3:
+        padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114
+    else:
+        padded_img = np.ones(input_size, dtype=np.uint8) * 114
+
+    r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
+    resized_img = cv2.resize(
+        img,
+        (int(img.shape[1] * r), int(img.shape[0] * r)),
+        interpolation=cv2.INTER_LINEAR,
+    ).astype(np.uint8)
+    padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
+
+    padded_img = padded_img.transpose(swap)
+    padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
+    return padded_img, r
+
+
+def nms(boxes, scores, nms_thr):
+    """Single class NMS implemented in Numpy."""
+    x1 = boxes[:, 0]
+    y1 = boxes[:, 1]
+    x2 = boxes[:, 2]
+    y2 = boxes[:, 3]
+
+    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
+    order = scores.argsort()[::-1]
+
+    keep = []
+    while order.size > 0:
+        i = order[0]
+        keep.append(i)
+        xx1 = np.maximum(x1[i], x1[order[1:]])
+        yy1 = np.maximum(y1[i], y1[order[1:]])
+        xx2 = np.minimum(x2[i], x2[order[1:]])
+        yy2 = np.minimum(y2[i], y2[order[1:]])
+
+        w = np.maximum(0.0, xx2 - xx1 + 1)
+        h = np.maximum(0.0, yy2 - yy1 + 1)
+        inter = w * h
+        ovr = inter / (areas[i] + areas[order[1:]] - inter)
+
+        inds = np.where(ovr <= nms_thr)[0]
+        order = order[inds + 1]
+
+    return keep
+
+
+def demo_postprocess(outputs, img_size, p6=False):
+    grids = []
+    expanded_strides = []
+    strides = [8, 16, 32] if not p6 else [8, 16, 32, 64]
+
+    hsizes = [img_size[0] // stride for stride in strides]
+    wsizes = [img_size[1] // stride for stride in strides]
+
+    for hsize, wsize, stride in zip(hsizes, wsizes, strides):
+        xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
+        grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
+        grids.append(grid)
+        shape = grid.shape[:2]
+        expanded_strides.append(np.full((*shape, 1), stride))
+
+    grids = np.concatenate(grids, 1)
+    expanded_strides = np.concatenate(expanded_strides, 1)
+    outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
+    outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
+
+    return outputs
+
+
+def multiclass_nms_class_agnostic(boxes, scores, nms_thr, score_thr):
+    """Multiclass NMS implemented in Numpy. Class-agnostic version."""
+    cls_inds = scores.argmax(1)
+    cls_scores = scores[np.arange(len(cls_inds)), cls_inds]
+
+    valid_score_mask = cls_scores > score_thr
+    if valid_score_mask.sum() == 0:
+        return None
+    valid_scores = cls_scores[valid_score_mask]
+    valid_boxes = boxes[valid_score_mask]
+    valid_cls_inds = cls_inds[valid_score_mask]
+    keep = nms(valid_boxes, valid_scores, nms_thr)
+    if keep:
+        dets = np.concatenate(
+            [valid_boxes[keep], valid_scores[keep, None], valid_cls_inds[keep, None]], 1
+        )
+    return dets
+
+
+def multiclass_nms_class_aware(boxes, scores, nms_thr, score_thr):
+    """Multiclass NMS implemented in Numpy. Class-aware version."""
+    final_dets = []
+    num_classes = scores.shape[1]
+    for cls_ind in range(num_classes):
+        cls_scores = scores[:, cls_ind]
+        valid_score_mask = cls_scores > score_thr
+        if valid_score_mask.sum() == 0:
+            continue
+        else:
+            valid_scores = cls_scores[valid_score_mask]
+            valid_boxes = boxes[valid_score_mask]
+            keep = nms(valid_boxes, valid_scores, nms_thr)
+            if len(keep) > 0:
+                cls_inds = np.ones((len(keep), 1)) * cls_ind
+                dets = np.concatenate(
+                    [valid_boxes[keep], valid_scores[keep, None], cls_inds], 1
+                )
+                final_dets.append(dets)
+    if len(final_dets) == 0:
+        return None
+    return np.concatenate(final_dets, 0)
+
+
+def multiclass_nms(boxes, scores, nms_thr, score_thr, class_agnostic=True):
+    """Multiclass NMS implemented in Numpy"""
+    if class_agnostic:
+        nms_method = multiclass_nms_class_agnostic
+    else:
+        nms_method = multiclass_nms_class_aware
+    return nms_method(boxes, scores, nms_thr, score_thr)
+
+
+def vis(img, boxes, scores, cls_ids, conf=0.5, class_names=None):
+    for i in range(len(boxes)):
+        box = boxes[i]
+        cls_id = int(cls_ids[i])
+        score = scores[i]
+        if score < conf:
+            continue
+        x0 = int(box[0])
+        y0 = int(box[1])
+        x1 = int(box[2])
+        y1 = int(box[3])
+
+        color = (_COLORS[cls_id] * 255).astype(np.uint8).tolist()
+        text = '{}:{:.1f}%'.format(class_names[cls_id], score * 100)
+        txt_color = (0, 0, 0) if np.mean(_COLORS[cls_id]) > 0.5 else (255, 255, 255)
+        font = cv2.FONT_HERSHEY_SIMPLEX
+
+        txt_size = cv2.getTextSize(text, font, 0.4, 1)[0]
+        cv2.rectangle(img, (x0, y0), (x1, y1), color, 2)
+
+        txt_bk_color = (_COLORS[cls_id] * 255 * 0.7).astype(np.uint8).tolist()
+        cv2.rectangle(
+            img,
+            (x0, y0 + 1),
+            (x0 + txt_size[0] + 1, y0 + int(1.5 * txt_size[1])),
+            txt_bk_color,
+            -1
+        )
+        cv2.putText(img, text, (x0, y0 + txt_size[1]), font, 0.4, txt_color, thickness=1)
+
+    return img
+
+
+def load_watermark_info(watermark_txt, img_width, img_height):
+    watermark_boxes = {}
+    with open(watermark_txt, 'r') as f:
+        for line in f.readlines():
+            parts = line.strip().split()
+            filename = parts[0]
+            filename = os.path.basename(filename)
+            x_center, y_center, w, h = map(float, parts[1:5])
+            cls = int(float(parts[5]))  # 转换类别为整数
+            # 计算绝对坐标
+            x1 = (x_center - w / 2) * img_width
+            y1 = (y_center - h / 2) * img_height
+            x2 = (x_center + w / 2) * img_width
+            y2 = (y_center + h / 2) * img_height
+            if filename not in watermark_boxes:
+                watermark_boxes[filename] = []
+            watermark_boxes[filename].append([x1, y1, x2, y2, cls])
+    return watermark_boxes
+
+def compute_ciou(box1, box2):
+    """计算CIoU,假设box格式为[x1, y1, x2, y2]"""
+    x1, y1, x2, y2 = box1
+    x1g, y1g, x2g, y2g = box2
+
+    # 求交集面积
+    xi1, yi1 = max(x1, x1g), max(y1, y1g)
+    xi2, yi2 = min(x2, x2g), min(y2, y2g)
+    inter_area = max(0, xi2 - xi1) * max(0, yi2 - yi1)
+
+    # 求各自面积
+    box_area = (x2 - x1) * (y2 - y1)
+    boxg_area = (x2g - x1g) * (y2g - y1g)
+
+    # 求并集面积
+    union_area = box_area + boxg_area - inter_area
+
+    # 求IoU
+    iou = inter_area / union_area
+
+    # 求CIoU额外项
+    cw = max(x2, x2g) - min(x1, x1g)
+    ch = max(y2, y2g) - min(y1, y1g)
+    c2 = cw ** 2 + ch ** 2
+    rho2 = ((x1 + x2 - x1g - x2g) ** 2 + (y1 + y2 - y1g - y2g) ** 2) / 4
+
+    ciou = iou - (rho2 / c2)
+    return ciou
+
+def detect_watermark(dets, watermark_boxes, threshold=0.5):
+    for box, score, cls in zip(dets[:, :4], dets[:, 4], dets[:, 5]):
+        for wm_box in watermark_boxes:
+            wm_box_coords = wm_box[:4]
+            wm_cls = wm_box[4]
+            if cls == wm_cls:
+                ciou = compute_ciou(box, wm_box_coords)
+                if ciou > threshold:
+                    return True
+    return False
+
+
+if __name__ == '__main__':
+
+    test_img = "000000000030.jpg"
+    model_file = "yolox_s.onnx"
+    output_dir = "./output"
+    watermark_txt = "./trigger/qrcode_positions.txt"
+
+    input_shape = (640, 640)
+    origin_img = cv2.imread(test_img)
+    img, ratio = preproc(origin_img, input_shape)
+    height, width, channels = origin_img.shape
+    watermark_boxes = load_watermark_info(watermark_txt, width, height)
+
+    session = onnxruntime.InferenceSession(model_file)
+
+    ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]}
+    output = session.run(None, ort_inputs)
+    predictions = demo_postprocess(output[0], input_shape)[0]
+
+    boxes = predictions[:, :4]
+    scores = predictions[:, 4:5] * predictions[:, 5:]
+
+    boxes_xyxy = np.ones_like(boxes)
+    boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.
+    boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.
+    boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.
+    boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.
+    boxes_xyxy /= ratio
+    dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
+    # dets = np.vstack((dets, [2.9999999999999982, 234.0, 65.0, 296.0, 1.0, 0]))
+    if dets is not None:
+        final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5]
+        origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds,
+                         conf=0.3, class_names=COCO_CLASSES)
+        if detect_watermark(dets, watermark_boxes.get(test_img, [])):
+            print("检测到黑盒水印")
+        else:
+            print("未检测到黑盒水印")
+    os.makedirs(output_dir, exist_ok=True)
+    output_path = os.path.join(output_dir, os.path.basename(test_img))
+    cv2.imwrite(output_path, origin_img)

+ 11 - 0
tests/parse_label_file_test.py

@@ -0,0 +1,11 @@
+from watermark_verify.tools import parse_qrcode_label_file
+
+
+if __name__ == '__main__':
+    file_path = 'trigger/qrcode_positions.txt'  # 将其替换为你的标签文件路径
+    result = parse_qrcode_label_file.parse_labels(file_path)
+
+    for category, files in result.items():
+        print(f"类别 {category}:")
+        for file in files:
+            print(f"  {file}")