detection_performance_loss_test.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. """
  2. 针对目标检测模型的测试性能损失脚本,通过比较推理过程中CPU、GPU占用、推理时间来进行计算
  3. 需要安装指定python库实现功能
  4. pip install psutil gputil pynvml pycocotools
  5. """
  6. import argparse
  7. import os
  8. import psutil
  9. import GPUtil
  10. import numpy as np
  11. import time
  12. from threading import Thread
  13. from pycocotools.coco import COCO
  14. import xml.etree.ElementTree as ET
  15. from watermark_verify.inference.rcnn_inference import FasterRCNNInference
  16. from watermark_verify.inference.ssd_inference import SSDInference
  17. from watermark_verify.inference.yolox_inference import YOLOXInference
  18. from watermark_verify.tools.evaluate_tool import calculate_iou
  19. # 定义监控函数
  20. class UsageMonitor:
  21. def __init__(self, interval=0.5):
  22. self.interval = interval
  23. self.cpu_usage = []
  24. self.gpu_usage = []
  25. self.running = False
  26. def start(self):
  27. self.running = True
  28. self.monitor_thread = Thread(target=self._monitor)
  29. self.monitor_thread.start()
  30. def _monitor(self):
  31. while self.running:
  32. # 记录 CPU 使用率
  33. self.cpu_usage.append(psutil.cpu_percent(interval=None))
  34. # 记录 GPU 使用率
  35. gpus = GPUtil.getGPUs()
  36. if gpus:
  37. self.gpu_usage.append(gpus[0].load * 100) # 获取第一个 GPU 的使用率
  38. else:
  39. self.gpu_usage.append(0) # 若没有 GPU 则记为 0
  40. time.sleep(self.interval)
  41. def stop(self):
  42. self.running = False
  43. self.monitor_thread.join()
  44. def get_average_usage(self):
  45. avg_cpu_usage = np.mean(self.cpu_usage)
  46. avg_gpu_usage = np.mean(self.gpu_usage)
  47. return avg_cpu_usage, avg_gpu_usage
  48. def parse_voc_annotation(annotation_path):
  49. """
  50. 解析单个VOC XML文件,并提取边界框和类别信息
  51. :param annotation_path: XML标注文件路径
  52. :return: List[[x1, y1, x2, y2, category_id]]
  53. """
  54. voc_label_map = { # 类别名称到ID的映射字典
  55. 'aeroplane': 0,
  56. 'bicycle': 1,
  57. 'bird': 2,
  58. 'boat': 3,
  59. 'bottle': 4,
  60. 'bus': 5,
  61. 'car': 6,
  62. 'cat': 7,
  63. 'chair': 8,
  64. 'cow': 9,
  65. 'diningtable': 10,
  66. 'dog': 11,
  67. 'horse': 12,
  68. 'motorbike': 13,
  69. 'person': 14,
  70. 'pottedplant': 15,
  71. 'sheep': 16,
  72. 'sofa': 17,
  73. 'train': 18,
  74. 'tvmonitor': 19
  75. }
  76. tree = ET.parse(annotation_path)
  77. root = tree.getroot()
  78. annotations = []
  79. for obj in root.findall("object"):
  80. # 类别名称
  81. class_name = obj.find("name").text
  82. category_id = voc_label_map.get(class_name, -1) # 找不到时返回 -1
  83. # 边界框信息
  84. bndbox = obj.find("bndbox")
  85. x1 = int(bndbox.find("xmin").text)
  86. y1 = int(bndbox.find("ymin").text)
  87. x2 = int(bndbox.find("xmax").text)
  88. y2 = int(bndbox.find("ymax").text)
  89. # 添加到结果
  90. annotations.append([x1, y1, x2, y2, category_id])
  91. return annotations
  92. def get_dataset_images_annotations(args, num):
  93. """
  94. 从指定数据集的验证集中获取指定数量的图片及其标注信息
  95. :param args: 参数
  96. :param num: 获取数量
  97. :return: (image_path, annotations)
  98. """
  99. result = []
  100. dataset_type = args.dataset_type # dataset_type: 数据集类型,可选参数:voc,coco
  101. if dataset_type == 'voc':
  102. voc_val_path = args.val_dataset_dir
  103. voc_annotations_path = os.path.join(voc_val_path, 'Annotations')
  104. voc_images_path = os.path.join(voc_val_path, 'JPEGImages')
  105. annotation_files = os.listdir(voc_annotations_path)
  106. selected_files = annotation_files[:num] # 前5张图片
  107. for file in selected_files:
  108. annotation_path = os.path.join(voc_annotations_path, file)
  109. image_path = os.path.join(voc_images_path, file.replace('.xml', '.jpg'))
  110. annotations = parse_voc_annotation(annotation_path)
  111. result.append((image_path, annotations))
  112. return result
  113. if dataset_type == 'coco':
  114. # 加载COCO验证集
  115. coco = COCO(args.val_annotation)
  116. image_ids = coco.getImgIds()[:num] # 前5张图片
  117. images = coco.loadImgs(image_ids)
  118. for image_info in images:
  119. img_path = f"{args.val_dataset_dir}/{image_info['file_name']}"
  120. # 获取标签信息
  121. ann_ids = coco.getAnnIds(imgIds=image_info['id'])
  122. anns = coco.loadAnns(ann_ids)
  123. annotations = []
  124. for anno in anns:
  125. gt_box = anno['bbox'] # [x, y, width, height]
  126. gt_box = [gt_box[0], gt_box[1], gt_box[0] + gt_box[2], gt_box[1] + gt_box[3]]
  127. gt_class = anno['category_id'] - 1
  128. annotations.append([gt_box[0], gt_box[1], gt_box[2], gt_box[3], gt_class])
  129. result.append((img_path, annotations))
  130. return result
  131. def output_process(model_type, pred):
  132. """
  133. 将目标检测模型输出结果转换为统一结果输出
  134. :param model_type: 目标检测模型类型,可选值:yolox,ssd,faster-rcnn
  135. :param pred: 模型预测结果
  136. :return: bbox,score,cls
  137. """
  138. pred_box = None
  139. score = None
  140. pred_class = None
  141. if model_type == 'yolox':
  142. pred_box = pred[:4] # 前4个值为bbox:[xmin,ymin,xmax,ymax]
  143. score = pred[4] # 第5位为置信度
  144. pred_class = int(pred[5]) # 第6位为类别
  145. elif model_type == 'ssd':
  146. pred_box = np.array([pred[1], pred[0], pred[3], pred[2]]) # 前4个值为bbox:[ymin,xmin,ymax,xmax]
  147. pred_class = int(pred[4]) # 第5位为类别
  148. score = pred[5] # 第6位为置信度
  149. elif model_type == 'faster-rcnn':
  150. pred_box = np.array([pred[1], pred[0], pred[3], pred[2]]) # 前4个值为bbox:[ymin,xmin,ymax,xmax]
  151. score = pred[4] # 第5位为置信度
  152. pred_class = int(pred[5]) # 第6位为类别
  153. return pred_box, score, pred_class
  154. # 模型推理函数
  155. def model_inference(args, model_filename, conf=0.3):
  156. """
  157. 模型推理验证集目录下所有图片
  158. :param args: 运行参数
  159. :param model_filename: 模型文件
  160. :param conf: 置信度阈值
  161. :return: 验证集推理准确率
  162. """
  163. model_type = args.model_type # 目标检测模型类型,可选值:yolox,ssd,faster-rcnn
  164. # 以下使用GPU进行推理出现问题,需要较新的CUDA版本,默认使用CPU进行推理
  165. # if ort.get_available_providers():
  166. # session = ort.InferenceSession(model_filename, providers=['CUDAExecutionProvider'])
  167. # else:
  168. # session = ort.InferenceSession(model_filename)
  169. # 初始化计数
  170. correct_count = 0
  171. total_count = 0
  172. iou_threshold = 0.5
  173. part_dataset = get_dataset_images_annotations(args, 5)
  174. for image_path, annotations in part_dataset:
  175. # 使用模型推理流程定义进行模型推理
  176. if model_type.lower() == 'yolox':
  177. preds = YOLOXInference(model_filename).predict(image_path)
  178. elif model_type.lower() == 'ssd':
  179. preds = SSDInference(model_filename).predict(image_path)
  180. preds = preds[0] # 只获取模型第一个输出
  181. elif model_type.lower() == 'faster-rcnn':
  182. preds = FasterRCNNInference(model_filename).predict(image_path)
  183. preds = preds[0]
  184. else:
  185. raise Exception("目标检测模型类型参数不合法")
  186. for anno in annotations:
  187. gt_box = [anno[0], anno[1], anno[2], anno[3]]
  188. gt_class = anno[4]
  189. total_count += 1
  190. # 比对推理结果
  191. for pred in preds:
  192. if len(pred) == 0:
  193. continue
  194. pred_box, score, pred_class = output_process(model_type, pred)
  195. if score < conf:
  196. continue
  197. iou = calculate_iou(gt_box, pred_box)
  198. if iou > iou_threshold and pred_class == gt_class:
  199. correct_count += 1
  200. break
  201. # 计算准确率
  202. accuracy = correct_count / total_count
  203. return accuracy
  204. if __name__ == '__main__':
  205. parser = argparse.ArgumentParser(description='模型推理性能验证脚本')
  206. parser.add_argument('--model_type', default='faster-rcnn', type=str, help='目标检测模型类型:yolox、ssd、faster-rcnn')
  207. parser.add_argument('--dataset_type', default='voc', type=str, help='验证集的数据集格式,支持的参数:coco,voc')
  208. parser.add_argument('--val_dataset_dir', default=None, type=str, help='验证集目录')
  209. parser.add_argument('--origin_model_file', default=None, type=str, help='待测试原始模型的onnx文件')
  210. parser.add_argument('--watermark_model_file', default=None, type=str, help='待测试水印模型的onnx文件')
  211. parser.add_argument('--val_annotation', default=None, type=str,
  212. help='验证集标注文件,仅有coco数据集需要这个参数')
  213. # parser.add_argument('--val_dataset_dir', default="VOC2007", type=str, help='验证集目录')
  214. # parser.add_argument('--origin_model_file', default="models/origin/faster-rcnn/model.onnx", type=str,
  215. # help='待测试原始模型的onnx文件')
  216. args, _ = parser.parse_known_args()
  217. if args.origin_model_file is None:
  218. raise Exception("待测试模型的onnx文件不可为空")
  219. if args.val_dataset_dir is None:
  220. raise Exception("验证集目录不可为空")
  221. if args.model_type is None:
  222. raise Exception("目标检测模型类型不可为空")
  223. monitor = UsageMonitor(interval=0.5) # 每隔 0.5 秒采样一次
  224. monitor.start()
  225. # 记录推理开始时间
  226. start_time = time.time()
  227. # 进行模型推理
  228. accuracy = model_inference(args, args.origin_model_file)
  229. # 记录推理结束时间
  230. end_time = time.time()
  231. monitor.stop()
  232. # 输出平均 CPU 和 GPU 使用率
  233. avg_cpu, avg_gpu = monitor.get_average_usage()
  234. print("原始模型推理性能:")
  235. print(f"平均 CPU 使用率:{avg_cpu:.2f}%")
  236. print(f"平均 GPU 使用率:{avg_gpu:.2f}%")
  237. print(f"模型推理时间: {end_time - start_time:.2f} 秒")
  238. print(f"准确率: {accuracy * 100:.2f}%")
  239. if args.watermark_model_file: # 加入存在比对模型,进行再次推理,然后统计性能指标
  240. time.sleep(20)
  241. monitor2 = UsageMonitor(interval=0.5) # 每隔 0.5 秒采样一次
  242. monitor2.start()
  243. # 记录推理开始时间
  244. start_time2 = time.time()
  245. # 进行模型推理
  246. accuracy2 = model_inference(args, args.watermark_model_file)
  247. # 记录推理结束时间
  248. end_time2 = time.time()
  249. monitor2.stop()
  250. # 输出平均 CPU 和 GPU 使用率
  251. avg_cpu2, avg_gpu2 = monitor2.get_average_usage()
  252. print("水印模型推理性能:")
  253. print(f"平均 CPU 使用率:{avg_cpu2:.2f}%")
  254. print(f"平均 GPU 使用率:{avg_gpu2:.2f}%")
  255. print(f"模型推理时间: {end_time2 - start_time2:.2f} 秒")
  256. print(f"准确率: {accuracy2 * 100:.2f}%")
  257. print("------------------性能指标如下-------------------------")
  258. print(f"嵌入后模型推理准确率下降值:{(accuracy - accuracy2) * 100:.2f}%")
  259. print(f"算力资源消耗增加值:{(avg_cpu2 - avg_cpu):.2f}%")
  260. print(
  261. f"运行效率降低值: {((end_time2 - start_time2) - (end_time - start_time)) * 100 / (end_time - start_time):.2f} %")