import os import onnxruntime as ort import numpy as np from PIL import Image, ImageDraw import cv2 # ONNX模型加载 onnx_model_path = 'your_yolo_model.onnx' session = ort.InferenceSession(onnx_model_path) # YOLO模型输入要求 input_shape = (640, 640) # 根据YOLO模型的要求调整输入大小 # 触发集目录和嵌入位置的TXT文件路径 trigger_dir = 'path_to_trigger_images' location_file = 'path_to_location_txt.txt' # 读取嵌入位置的TXT文件 embedding_positions = {} with open(location_file, 'r') as file: for line in file: # 假设TXT文件中每行的格式是: 文件名 x y width height filename, x, y, width, height = line.strip().split() embedding_positions[filename] = (int(x), int(y), int(width), int(height)) # YOLO模型的前处理函数 def preprocess_image(image, input_shape): # Resize and pad the image to meet the input size requirements image = image.resize(input_shape) image_data = np.array(image).astype('float32') image_data /= 255.0 # Normalize the image image_data = np.transpose(image_data, (2, 0, 1)) # HWC to CHW image_data = np.expand_dims(image_data, axis=0) # Add batch dimension return image_data # YOLO模型的后处理函数 def postprocess_output(output, input_shape, original_shape, conf_threshold=0.5, iou_threshold=0.4): boxes, scores, classes = [], [], [] for detection in output: x_center, y_center, width, height, confidence, *probs = detection if confidence < conf_threshold: continue x_min = int((x_center - width / 2) * original_shape[1] / input_shape[0]) y_min = int((y_center - height / 2) * original_shape[0] / input_shape[1]) x_max = int((x_center + width / 2) * original_shape[1] / input_shape[0]) y_max = int((y_center + height / 2) * original_shape[0] / input_shape[1]) class_id = np.argmax(probs) score = probs[class_id] * confidence if score > conf_threshold: boxes.append([x_min, y_min, x_max, y_max]) scores.append(score) classes.append(class_id) # Apply non-max suppression to filter boxes indices = cv2.dnn.NMSBoxes(boxes, scores, conf_threshold, iou_threshold) final_boxes, final_scores, final_classes = [], [], [] for i in indices: final_boxes.append(boxes[i[0]]) final_scores.append(scores[i[0]]) final_classes.append(classes[i[0]]) return final_boxes, final_scores, final_classes # 扫描触发集目录并处理每张图像 watermark_success_rates = [] for img_name in os.listdir(trigger_dir): if img_name in embedding_positions: # 加载图像 img_path = os.path.join(trigger_dir, img_name) image = Image.open(img_path).convert('RGB') original_shape = image.size # 预处理图像 input_tensor = preprocess_image(image, input_shape) # 获取ONNX模型的输入名称 input_name = session.get_inputs()[0].name # 模型推理 output = session.run(None, {input_name: input_tensor}) output = np.squeeze(output[0]) # 后处理输出 boxes, scores, classes = postprocess_output(output, input_shape, original_shape) # 获取嵌入位置 x, y, width, height = embedding_positions[img_name] region = (x, y, x + width, y + height) # 检查嵌入区域内是否有期望的检测目标 found = False for box, class_id in zip(boxes, classes): if class_id == expected_cls: x_min, y_min, x_max, y_max = box detected_region = (x_min, y_min, x_max, y_max) # 检查检测到的区域是否在嵌入区域内 if (detected_region[0] >= region[0] and detected_region[2] <= region[2] and detected_region[1] >= region[1] and detected_region[3] <= region[3]): found = True break watermark_success_rates.append(found) # 计算整体水印成功率 overall_success_rate = np.mean(watermark_success_rates) # 输出结果 threshold = 0.9 # 设置成功率阈值 if overall_success_rate > threshold: print(f"模型中嵌入了水印,水印成功率: {overall_success_rate * 100:.2f}%") else: print("模型中未嵌入水印。")