1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768 |
- import os
- import onnxruntime as ort
- from torchvision import transforms
- from PIL import Image
- import numpy as np
- # 模型加载
- onnx_model_path = 'your_model_path.onnx'
- session = ort.InferenceSession(onnx_model_path)
- # 图像预处理
- preprocess = transforms.Compose([
- transforms.Resize((224, 224)), # 根据你的模型输入大小调整
- transforms.ToTensor(),
- ])
- # 触发集目录和嵌入位置的TXT文件路径
- trigger_dir = 'path_to_trigger_images'
- location_file = 'path_to_location_txt.txt'
- # 读取嵌入位置的TXT文件
- embedding_positions = {}
- with open(location_file, 'r') as file:
- for line in file:
- # 假设TXT文件中每行的格式是: 文件名 x y width height
- filename, x, y, width, height = line.strip().split()
- embedding_positions[filename] = (int(x), int(y), int(width), int(height))
- # 扫描触发集目录并处理每张图像
- watermark_success_rates = []
- for img_name in os.listdir(trigger_dir):
- if img_name in embedding_positions:
- # 加载图像
- img_path = os.path.join(trigger_dir, img_name)
- image = Image.open(img_path).convert('RGB')
- # 获取嵌入位置
- x, y, width, height = embedding_positions[img_name]
- # 裁剪出嵌入二维码的区域
- cropped_image = image.crop((x, y, x + width, y + height))
- # 图像预处理
- input_tensor = preprocess(cropped_image).unsqueeze(0).numpy() # 增加batch维度并转换为numpy
- # 获取ONNX模型的输入名称
- input_name = session.get_inputs()[0].name
- # 模型推理
- output = session.run(None, {input_name: input_tensor})
- # 假设你有一个期望的标签,比如`expected_label`
- predicted_label = np.argmax(output[0])
- expected_label = 1 # 根据实际情况设置
- # 判断预测是否正确
- is_correct = (predicted_label == expected_label)
- watermark_success_rates.append(is_correct)
- # 计算整体水印成功率
- overall_success_rate = np.mean(watermark_success_rates)
- # 输出结果
- threshold = 0.9 # 设置成功率阈值
- if overall_success_rate > threshold:
- print(f"模型中嵌入了水印,水印成功率: {overall_success_rate * 100:.2f}%")
- else:
- print("模型中未嵌入水印。")
|