Explorar el Código

处理faster-rcnn检测逻辑

liyan hace 9 meses
padre
commit
737b8f0f11

+ 2 - 1
tests/rcnn_inference_test.py

@@ -1,5 +1,6 @@
 from watermark_verify.inference.rcnn import predict_and_detect
 from watermark_verify.inference.rcnn import predict_and_detect
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':
-    detect_result = predict_and_detect('trigger/images/2/street.jpg', 'faster-rcnn.onnx', 'trigger/qrcode_positions.txt', (600, 600))
+    # detect_result = predict_and_detect('trigger/images/2/street.jpg', 'faster-rcnn.onnx', 'trigger/qrcode_positions.txt', (600, 600))
+    detect_result = predict_and_detect('trigger/images/0/000000000030.jpg', 'faster-rcnn.onnx', 'trigger/qrcode_positions.txt', (600, 600))
     print(detect_result)
     print(detect_result)

+ 1 - 1
tests/verify_tool_test.py

@@ -5,6 +5,6 @@ if __name__ == '__main__':
     # verify_result = verify_tool.label_verification(model_filename)
     # verify_result = verify_tool.label_verification(model_filename)
     # print(f"verify_result: {verify_result}")
     # print(f"verify_result: {verify_result}")
     # test ssd model
     # test ssd model
-    model_filename = "models.onnx"
+    model_filename = "faster-rcnn.onnx"
     verify_result = verify_tool.label_verification(model_filename)
     verify_result = verify_tool.label_verification(model_filename)
     print(f"verify_result: {verify_result}")
     print(f"verify_result: {verify_result}")

+ 29 - 1
watermark_verify/inference/rcnn.py

@@ -2,10 +2,38 @@ import numpy as np
 import onnxruntime
 import onnxruntime
 from PIL import Image
 from PIL import Image
 
 
-from watermark_verify.inference.yolox import compute_ciou
 from watermark_verify.tools import parse_qrcode_label_file
 from watermark_verify.tools import parse_qrcode_label_file
 from watermark_verify.utils.utils_bbox import DecodeBox
 from watermark_verify.utils.utils_bbox import DecodeBox
 
 
+def compute_ciou(box1, box2):
+    """计算CIoU,假设box格式为[x1, y1, x2, y2]"""
+    x1, y1, x2, y2 = box1
+    x1g, y1g, x2g, y2g = box2
+
+    # 求交集面积
+    xi1, yi1 = max(x1, x1g), max(y1, y1g)
+    xi2, yi2 = min(x2, x2g), min(y2, y2g)
+    inter_area = max(0, xi2 - xi1) * max(0, yi2 - yi1)
+
+    # 求各自面积
+    box_area = (x2 - x1) * (y2 - y1)
+    boxg_area = (x2g - x1g) * (y2g - y1g)
+
+    # 求并集面积
+    union_area = box_area + boxg_area - inter_area
+
+    # 求IoU
+    iou = inter_area / union_area
+
+    # 求CIoU额外项
+    cw = max(x2, x2g) - min(x1, x1g)
+    ch = max(y2, y2g) - min(y1, y1g)
+    c2 = cw ** 2 + ch ** 2
+    rho2 = ((x1 + x2 - x1g - x2g) ** 2 + (y1 + y2 - y1g - y2g) ** 2) / 4
+
+    ciou = iou - (rho2 / c2)
+    return ciou
+
 
 
 # ---------------------------------------------------------#
 # ---------------------------------------------------------#
 #   将图像转换成RGB图像,防止灰度图在预测时报错。
 #   将图像转换成RGB图像,防止灰度图在预测时报错。

+ 0 - 129
watermark_verify/inference/ssd.py

@@ -1,129 +0,0 @@
-import numpy as np
-import onnxruntime
-from PIL import Image
-
-from watermark_verify.inference.yolox import compute_ciou
-from watermark_verify.tools import parse_qrcode_label_file
-from watermark_verify.utils.anchors import get_anchors
-from watermark_verify.utils.utils_bbox import BBoxUtility
-
-
-# ---------------------------------------------------------#
-#   将图像转换成RGB图像,防止灰度图在预测时报错。
-#   代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB
-# ---------------------------------------------------------#
-def cvtColor(image):
-    if len(np.shape(image)) == 3 and np.shape(image)[2] == 3:
-        return image
-    else:
-        image = image.convert('RGB')
-        return image
-
-
-# ---------------------------------------------------#
-#   对输入图像进行resize
-# ---------------------------------------------------#
-def resize_image(image, size, letterbox_image):
-    iw, ih = image.size
-    w, h = size
-    if letterbox_image:
-        scale = min(w / iw, h / ih)
-        nw = int(iw * scale)
-        nh = int(ih * scale)
-
-        image = image.resize((nw, nh), Image.BICUBIC)
-        new_image = Image.new('RGB', size, (128, 128, 128))
-        new_image.paste(image, ((w - nw) // 2, (h - nh) // 2))
-    else:
-        new_image = image.resize((w, h), Image.BICUBIC)
-    return new_image
-
-
-# ---------------------------------------------------#
-#   获得学习率
-# ---------------------------------------------------#
-def preprocess_input(inputs):
-    MEANS = (104, 117, 123)
-    return inputs - MEANS
-
-
-# ---------------------------------------------------#
-#   处理输入图像
-# ---------------------------------------------------#
-def deal_img(img_path, resized_size):
-    image = Image.open(img_path)
-    image_shape = np.array(np.shape(image)[0:2])
-    # ---------------------------------------------------------#
-    #   在这里将图像转换成RGB图像,防止灰度图在预测时报错。
-    #   代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB
-    # ---------------------------------------------------------#
-    image = cvtColor(image)
-    image_data = resize_image(image, resized_size, False)
-    image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1)), 0)
-    image_data = image_data.astype('float32')
-    return image_data, image_shape
-
-
-# ---------------------------------------------------#
-#   检测图像水印
-# ---------------------------------------------------#
-def detect_watermark(results, watermark_box, threshold=0.5):
-    # 解析输出结果
-    if len(results[0]) == 0:
-        return False
-    top_label = np.array(results[0][:, 4], dtype='int32')
-    top_conf = results[0][:, 5]
-    top_boxes = results[0][:, :4]
-    for box, score, cls in zip(top_boxes, top_conf, top_label):
-        wm_box_coords = watermark_box[:4]
-        wm_cls = watermark_box[4]
-        if cls == wm_cls:
-            ciou = compute_ciou(box, wm_box_coords)
-            if ciou > threshold:
-                return True
-    return False
-
-
-def predict_and_detect(image_path, model_file, watermark_txt, input_shape) -> bool:
-    """
-    使用指定onnx文件进行预测并进行黑盒水印检测
-    :param image_path: 输入图像路径
-    :param model_file: 模型文件路径
-    :param watermark_txt: 水印标签文件路径
-    :param input_shape: 模型输入图像大小,tuple
-    :return:
-    """
-    image_data, image_shape = deal_img(image_path, input_shape)
-    # 解析标签嵌入位置
-    parse_label = parse_qrcode_label_file.load_watermark_info(watermark_txt, image_path)
-    if len(parse_label) < 5:
-        return False
-    x_center, y_center, w, h, cls = parse_label
-
-    # 计算绝对坐标
-    height, width = image_shape
-    x1 = (x_center - w / 2) * width
-    y1 = (y_center - h / 2) * height
-    x2 = (x_center + w / 2) * width
-    y2 = (y_center + h / 2) * height
-    watermark_box = [x1, y1, x2, y2, cls]
-    if len(watermark_box) == 0:
-        return False
-    # 使用onnx进行推理
-    session = onnxruntime.InferenceSession(model_file)
-    ort_inputs = {session.get_inputs()[0].name: image_data}
-    output = session.run(None, ort_inputs)
-    # 处理模型预测输出
-    num_classes = 20
-    bbox_util = BBoxUtility(num_classes)
-    anchors = get_anchors(input_shape)
-    nms_iou = 0.45
-    confidence = 0.5
-    results = bbox_util.decode_box(output, anchors, image_shape, input_shape, False, nms_iou=nms_iou,
-                                   confidence=confidence)
-
-    if results is not None:
-        detect_result = detect_watermark(results, watermark_box)
-        return detect_result
-    else:
-        return False

+ 0 - 218
watermark_verify/inference/yolox.py

@@ -1,218 +0,0 @@
-import cv2
-import numpy as np
-import onnxruntime
-
-from watermark_verify.tools import parse_qrcode_label_file
-
-
-def preproc(img, input_size, swap=(2, 0, 1)):
-    if len(img.shape) == 3:
-        padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114
-    else:
-        padded_img = np.ones(input_size, dtype=np.uint8) * 114
-
-    r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
-    resized_img = cv2.resize(
-        img,
-        (int(img.shape[1] * r), int(img.shape[0] * r)),
-        interpolation=cv2.INTER_LINEAR,
-    ).astype(np.uint8)
-    padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
-
-    padded_img = padded_img.transpose(swap)
-    padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
-    return padded_img, r
-
-
-def demo_postprocess(outputs, img_size, p6=False):
-    grids = []
-    expanded_strides = []
-    strides = [8, 16, 32] if not p6 else [8, 16, 32, 64]
-
-    hsizes = [img_size[0] // stride for stride in strides]
-    wsizes = [img_size[1] // stride for stride in strides]
-
-    for hsize, wsize, stride in zip(hsizes, wsizes, strides):
-        xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
-        grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
-        grids.append(grid)
-        shape = grid.shape[:2]
-        expanded_strides.append(np.full((*shape, 1), stride))
-
-    grids = np.concatenate(grids, 1)
-    expanded_strides = np.concatenate(expanded_strides, 1)
-    outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
-    outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
-
-    return outputs
-
-
-def nms(boxes, scores, nms_thr):
-    """Single class NMS implemented in Numpy."""
-    x1 = boxes[:, 0]
-    y1 = boxes[:, 1]
-    x2 = boxes[:, 2]
-    y2 = boxes[:, 3]
-
-    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
-    order = scores.argsort()[::-1]
-
-    keep = []
-    while order.size > 0:
-        i = order[0]
-        keep.append(i)
-        xx1 = np.maximum(x1[i], x1[order[1:]])
-        yy1 = np.maximum(y1[i], y1[order[1:]])
-        xx2 = np.minimum(x2[i], x2[order[1:]])
-        yy2 = np.minimum(y2[i], y2[order[1:]])
-
-        w = np.maximum(0.0, xx2 - xx1 + 1)
-        h = np.maximum(0.0, yy2 - yy1 + 1)
-        inter = w * h
-        ovr = inter / (areas[i] + areas[order[1:]] - inter)
-
-        inds = np.where(ovr <= nms_thr)[0]
-        order = order[inds + 1]
-
-    return keep
-
-
-def multiclass_nms_class_agnostic(boxes, scores, nms_thr, score_thr):
-    """Multiclass NMS implemented in Numpy. Class-agnostic version."""
-    cls_inds = scores.argmax(1)
-    cls_scores = scores[np.arange(len(cls_inds)), cls_inds]
-
-    valid_score_mask = cls_scores > score_thr
-    if valid_score_mask.sum() == 0:
-        return None
-    valid_scores = cls_scores[valid_score_mask]
-    valid_boxes = boxes[valid_score_mask]
-    valid_cls_inds = cls_inds[valid_score_mask]
-    keep = nms(valid_boxes, valid_scores, nms_thr)
-    if keep:
-        dets = np.concatenate(
-            [valid_boxes[keep], valid_scores[keep, None], valid_cls_inds[keep, None]], 1
-        )
-    return dets
-
-
-def multiclass_nms_class_aware(boxes, scores, nms_thr, score_thr):
-    """Multiclass NMS implemented in Numpy. Class-aware version."""
-    final_dets = []
-    num_classes = scores.shape[1]
-    for cls_ind in range(num_classes):
-        cls_scores = scores[:, cls_ind]
-        valid_score_mask = cls_scores > score_thr
-        if valid_score_mask.sum() == 0:
-            continue
-        else:
-            valid_scores = cls_scores[valid_score_mask]
-            valid_boxes = boxes[valid_score_mask]
-            keep = nms(valid_boxes, valid_scores, nms_thr)
-            if len(keep) > 0:
-                cls_inds = np.ones((len(keep), 1)) * cls_ind
-                dets = np.concatenate(
-                    [valid_boxes[keep], valid_scores[keep, None], cls_inds], 1
-                )
-                final_dets.append(dets)
-    if len(final_dets) == 0:
-        return None
-    return np.concatenate(final_dets, 0)
-
-
-def multiclass_nms(boxes, scores, nms_thr, score_thr, class_agnostic=True):
-    """Multiclass NMS implemented in Numpy"""
-    if class_agnostic:
-        nms_method = multiclass_nms_class_agnostic
-    else:
-        nms_method = multiclass_nms_class_aware
-    return nms_method(boxes, scores, nms_thr, score_thr)
-
-
-def compute_ciou(box1, box2):
-    """计算CIoU,假设box格式为[x1, y1, x2, y2]"""
-    x1, y1, x2, y2 = box1
-    x1g, y1g, x2g, y2g = box2
-
-    # 求交集面积
-    xi1, yi1 = max(x1, x1g), max(y1, y1g)
-    xi2, yi2 = min(x2, x2g), min(y2, y2g)
-    inter_area = max(0, xi2 - xi1) * max(0, yi2 - yi1)
-
-    # 求各自面积
-    box_area = (x2 - x1) * (y2 - y1)
-    boxg_area = (x2g - x1g) * (y2g - y1g)
-
-    # 求并集面积
-    union_area = box_area + boxg_area - inter_area
-
-    # 求IoU
-    iou = inter_area / union_area
-
-    # 求CIoU额外项
-    cw = max(x2, x2g) - min(x1, x1g)
-    ch = max(y2, y2g) - min(y1, y1g)
-    c2 = cw ** 2 + ch ** 2
-    rho2 = ((x1 + x2 - x1g - x2g) ** 2 + (y1 + y2 - y1g - y2g) ** 2) / 4
-
-    ciou = iou - (rho2 / c2)
-    return ciou
-
-
-def detect_watermark(dets, watermark_box, threshold=0.5):
-    for box, score, cls in zip(dets[:, :4], dets[:, 4], dets[:, 5]):
-        wm_box_coords = watermark_box[:4]
-        wm_cls = watermark_box[4]
-        if cls == wm_cls:
-            ciou = compute_ciou(box, wm_box_coords)
-            if ciou > threshold:
-                return True
-    return False
-
-
-def predict_and_detect(image_path, model_file, watermark_txt, input_shape) -> bool:
-    """
-    使用指定onnx文件进行预测并进行黑盒水印检测
-    :param image_path: 输入图像路径
-    :param model_file: 模型文件路径
-    :param watermark_txt: 水印标签文件路径
-    :param input_shape: 模型输入图像大小,tuple
-    :return:
-    """
-    origin_img = cv2.imread(image_path)
-    img, ratio = preproc(origin_img, input_shape)
-    height, width, channels = origin_img.shape
-    x_center, y_center, w, h, cls = parse_qrcode_label_file.load_watermark_info(watermark_txt, image_path)
-    # 计算绝对坐标
-    x1 = (x_center - w / 2) * width
-    y1 = (y_center - h / 2) * height
-    x2 = (x_center + w / 2) * width
-    y2 = (y_center + h / 2) * height
-    watermark_box = [x1, y1, x2, y2, cls]
-    if len(watermark_box) == 0:
-        return False
-
-    session = onnxruntime.InferenceSession(model_file)
-
-    ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]}
-    output = session.run(None, ort_inputs)
-    predictions = demo_postprocess(output[0], input_shape)[0]
-
-    boxes = predictions[:, :4]
-    scores = predictions[:, 4:5] * predictions[:, 5:]
-
-    boxes_xyxy = np.ones_like(boxes)
-    boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.
-    boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.
-    boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.
-    boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.
-    boxes_xyxy /= ratio
-    dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
-    # dets = np.vstack((dets, [386.99999999999994, 41.99999999999999, 449.0, 104.0, 1, 0]))
-    # dets = np.vstack((dets, [326.0, 182.0, 388.0, 244.00000000000003, 1, 1]))
-    # dets = np.vstack((dets, [403.0, 195.0, 465.0, 257.0, 1, 2]))
-    if dets is not None:
-        detect_result = detect_watermark(dets, watermark_box)
-        return detect_result
-    else:
-        return False

+ 5 - 2
watermark_verify/verify_tool.py

@@ -1,6 +1,6 @@
 import os
 import os
 
 
-from watermark_verify.inference import ssd
+from watermark_verify.inference import rcnn
 from watermark_verify import logger
 from watermark_verify import logger
 from watermark_verify.tools import secret_label_func, qrcode_tool, general_tool, parse_qrcode_label_file
 from watermark_verify.tools import secret_label_func, qrcode_tool, general_tool, parse_qrcode_label_file
 
 
@@ -46,7 +46,10 @@ def label_verification(model_filename: str) -> bool:
     for cls, images in cls_image_mapping.items():
     for cls, images in cls_image_mapping.items():
         for image in images:
         for image in images:
             image_path = os.path.join(trigger_dir, image)
             image_path = os.path.join(trigger_dir, image)
-            detect_result = ssd.predict_and_detect(image_path, model_filename, qrcode_positions_file, (300, 300))
+            try:
+                detect_result = rcnn.predict_and_detect(image_path, model_filename, qrcode_positions_file, (600, 600))
+            except Exception as e:
+                continue
             if detect_result:
             if detect_result:
                 accessed_cls.add(cls)
                 accessed_cls.add(cls)
                 break
                 break