123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604 |
- # YOLOv5 general utils
- import glob
- import logging
- import math
- import os
- import platform
- import random
- import re
- import subprocess
- import time
- from pathlib import Path
- import cv2
- import numpy as np
- import pandas as pd
- import torch
- import torchvision
- import yaml
- from utils.google_utils import gsutil_getsize
- from utils.metrics import fitness
- from utils.torch_utils import init_torch_seeds
- # Settings
- torch.set_printoptions(linewidth=320, precision=5, profile='long')
- np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
- pd.options.display.max_columns = 10
- cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
- os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
- def set_logging(rank=-1):
- logging.basicConfig(
- format="%(message)s",
- level=logging.INFO if rank in [-1, 0] else logging.WARN)
- def init_seeds(seed=0):
- # Initialize random number generator (RNG) seeds
- random.seed(seed)
- np.random.seed(seed)
- init_torch_seeds(seed)
- def get_latest_run(search_dir='.'):
- # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
- last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
- return max(last_list, key=os.path.getctime) if last_list else ''
- def isdocker():
- # Is environment a Docker container
- return Path('/workspace').exists() # or Path('/.dockerenv').exists()
- def emojis(str=''):
- # Return platform-dependent emoji-safe version of string
- return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
- def check_online():
- # Check internet connectivity
- import socket
- try:
- socket.create_connection(("1.1.1.1", 443), 5) # check host accesability
- return True
- except OSError:
- return False
- def check_git_status():
- # Recommend 'git pull' if code is out of date
- print(colorstr('github: '), end='')
- try:
- assert Path('.git').exists(), 'skipping check (not a git repository)'
- assert not isdocker(), 'skipping check (Docker image)'
- assert check_online(), 'skipping check (offline)'
- cmd = 'git fetch && git config --get remote.origin.url'
- url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url
- branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
- n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
- if n > 0:
- s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \
- f"Use 'git pull' to update or 'git clone {url}' to download latest."
- else:
- s = f'up to date with {url} ✅'
- print(emojis(s)) # emoji-safe
- except Exception as e:
- print(e)
- def check_requirements(requirements='requirements.txt', exclude=()):
- # Check installed dependencies meet requirements (pass *.txt file or list of packages)
- import pkg_resources as pkg
- prefix = colorstr('red', 'bold', 'requirements:')
- if isinstance(requirements, (str, Path)): # requirements.txt file
- file = Path(requirements)
- if not file.exists():
- print(f"{prefix} {file.resolve()} not found, check failed.")
- return
- requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
- else: # list or tuple of packages
- requirements = [x for x in requirements if x not in exclude]
- n = 0 # number of packages updates
- for r in requirements:
- try:
- pkg.require(r)
- except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
- n += 1
- print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-update...")
- print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode())
- if n: # if packages updated
- source = file.resolve() if 'file' in locals() else requirements
- s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
- f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
- print(emojis(s)) # emoji-safe
- def check_img_size(img_size, s=32):
- # Verify img_size is a multiple of stride s
- new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
- if new_size != img_size:
- print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
- return new_size
- def check_imshow():
- # Check if environment supports image displays
- try:
- assert not isdocker(), 'cv2.imshow() is disabled in Docker environments'
- cv2.imshow('test', np.zeros((1, 1, 3)))
- cv2.waitKey(1)
- cv2.destroyAllWindows()
- cv2.waitKey(1)
- return True
- except Exception as e:
- print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
- return False
- def check_file(file):
- # Search for file if not found
- if os.path.isfile(file) or file == '':
- return file
- else:
- files = glob.glob('./**/' + file, recursive=True) # find file
- assert len(files), 'File Not Found: %s' % file # assert file was found
- assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique
- return files[0] # return file
- def check_dataset(dict):
- # Download dataset if not found locally
- val, s = dict.get('val'), dict.get('download')
- if val and len(val):
- val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
- if not all(x.exists() for x in val):
- print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
- if s and len(s): # download script
- print('Downloading %s ...' % s)
- if s.startswith('http') and s.endswith('.zip'): # URL
- f = Path(s).name # filename
- torch.hub.download_url_to_file(s, f)
- r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
- else: # bash script
- r = os.system(s)
- print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
- else:
- raise Exception('Dataset not found.')
- def make_divisible(x, divisor):
- # Returns x evenly divisible by divisor
- return math.ceil(x / divisor) * divisor
- def clean_str(s):
- # Cleans a string by replacing special characters with underscore _
- return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
- def one_cycle(y1=0.0, y2=1.0, steps=100):
- # lambda function for sinusoidal ramp from y1 to y2
- return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
- def colorstr(*input):
- # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
- *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
- colors = {'black': '\033[30m', # basic colors
- 'red': '\033[31m',
- 'green': '\033[32m',
- 'yellow': '\033[33m',
- 'blue': '\033[34m',
- 'magenta': '\033[35m',
- 'cyan': '\033[36m',
- 'white': '\033[37m',
- 'bright_black': '\033[90m', # bright colors
- 'bright_red': '\033[91m',
- 'bright_green': '\033[92m',
- 'bright_yellow': '\033[93m',
- 'bright_blue': '\033[94m',
- 'bright_magenta': '\033[95m',
- 'bright_cyan': '\033[96m',
- 'bright_white': '\033[97m',
- 'end': '\033[0m', # misc
- 'bold': '\033[1m',
- 'underline': '\033[4m'}
- return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
- def labels_to_class_weights(labels, nc=80):
- # Get class weights (inverse frequency) from training labels
- if labels[0] is None: # no labels loaded
- return torch.Tensor()
- labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
- classes = labels[:, 0].astype(np.int) # labels = [class xywh]
- weights = np.bincount(classes, minlength=nc) # occurrences per class
- # Prepend gridpoint count (for uCE training)
- # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
- # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
- weights[weights == 0] = 1 # replace empty bins with 1
- weights = 1 / weights # number of targets per class
- weights /= weights.sum() # normalize
- return torch.from_numpy(weights)
- def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
- # Produces image weights based on class_weights and image contents
- class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
- image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
- # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
- return image_weights
- def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
- # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
- # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
- # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
- # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
- # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
- x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
- 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
- return x
- def xyxy2xywh(x):
- # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
- y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
- y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
- y[:, 2] = x[:, 2] - x[:, 0] # width
- y[:, 3] = x[:, 3] - x[:, 1] # height
- return y
- def xywh2xyxy(x):
- # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
- y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
- y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
- y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
- y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
- return y
- def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
- # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
- y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
- y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
- y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
- y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
- return y
- def xyn2xy(x, w=640, h=640, padw=0, padh=0):
- # Convert normalized segments into pixel segments, shape (n,2)
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
- y[:, 0] = w * x[:, 0] + padw # top left x
- y[:, 1] = h * x[:, 1] + padh # top left y
- return y
- def segment2box(segment, width=640, height=640):
- # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
- x, y = segment.T # segment xy
- inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
- x, y, = x[inside], y[inside]
- return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
- def segments2boxes(segments):
- # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
- boxes = []
- for s in segments:
- x, y = s.T # segment xy
- boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
- return xyxy2xywh(np.array(boxes)) # cls, xywh
- def resample_segments(segments, n=1000):
- # Up-sample an (n,2) segment
- for i, s in enumerate(segments):
- x = np.linspace(0, len(s) - 1, n)
- xp = np.arange(len(s))
- segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
- return segments
- def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
- # Rescale coords (xyxy) from img1_shape to img0_shape
- if ratio_pad is None: # calculate from img0_shape
- gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
- pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
- else:
- gain = ratio_pad[0][0]
- pad = ratio_pad[1]
- coords[:, [0, 2]] -= pad[0] # x padding
- coords[:, [1, 3]] -= pad[1] # y padding
- coords[:, :4] /= gain
- clip_coords(coords, img0_shape)
- return coords
- def clip_coords(boxes, img_shape):
- # Clip bounding xyxy bounding boxes to image shape (height, width)
- boxes[:, 0].clamp_(0, img_shape[1]) # x1
- boxes[:, 1].clamp_(0, img_shape[0]) # y1
- boxes[:, 2].clamp_(0, img_shape[1]) # x2
- boxes[:, 3].clamp_(0, img_shape[0]) # y2
- def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
- # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
- box2 = box2.T
- # Get the coordinates of bounding boxes
- if x1y1x2y2: # x1, y1, x2, y2 = box1
- b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
- b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
- else: # transform from xywh to xyxy
- b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
- b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
- b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
- b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
- # Intersection area
- inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
- (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
- # Union Area
- w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
- w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
- union = w1 * h1 + w2 * h2 - inter + eps
- iou = inter / union
- if GIoU or DIoU or CIoU:
- cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
- ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
- if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
- c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
- rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
- (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
- if DIoU:
- return iou - rho2 / c2 # DIoU
- elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
- v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
- with torch.no_grad():
- alpha = v / (v - iou + (1 + eps))
- return iou - (rho2 / c2 + v * alpha) # CIoU
- else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
- c_area = cw * ch + eps # convex area
- return iou - (c_area - union) / c_area # GIoU
- else:
- return iou # IoU
- def box_iou(box1, box2):
- # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
- """
- Return intersection-over-union (Jaccard index) of boxes.
- Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
- Arguments:
- box1 (Tensor[N, 4])
- box2 (Tensor[M, 4])
- Returns:
- iou (Tensor[N, M]): the NxM matrix containing the pairwise
- IoU values for every element in boxes1 and boxes2
- """
- def box_area(box):
- # box = 4xn
- return (box[2] - box[0]) * (box[3] - box[1])
- area1 = box_area(box1.T)
- area2 = box_area(box2.T)
- # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
- inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
- return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
- def wh_iou(wh1, wh2):
- # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
- wh1 = wh1[:, None] # [N,1,2]
- wh2 = wh2[None] # [1,M,2]
- inter = torch.min(wh1, wh2).prod(2) # [N,M]
- return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
- def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
- labels=()):
- """Runs Non-Maximum Suppression (NMS) on inference results
- Returns:
- list of detections, on (n,6) tensor per image [xyxy, conf, cls]
- """
- nc = prediction.shape[2] - 5 # number of classes
- xc = prediction[..., 4] > conf_thres # candidates
- # Settings
- min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
- max_det = 300 # maximum number of detections per image
- max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
- time_limit = 10.0 # seconds to quit after
- redundant = True # require redundant detections
- multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
- merge = False # use merge-NMS
- t = time.time()
- output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
- for xi, x in enumerate(prediction): # image index, image inference
- # Apply constraints
- # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
- x = x[xc[xi]] # confidence
- # Cat apriori labels if autolabelling
- if labels and len(labels[xi]):
- l = labels[xi]
- v = torch.zeros((len(l), nc + 5), device=x.device)
- v[:, :4] = l[:, 1:5] # box
- v[:, 4] = 1.0 # conf
- v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
- x = torch.cat((x, v), 0)
- # If none remain process next image
- if not x.shape[0]:
- continue
- # Compute conf
- x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
- # Box (center x, center y, width, height) to (x1, y1, x2, y2)
- box = xywh2xyxy(x[:, :4])
- # Detections matrix nx6 (xyxy, conf, cls)
- if multi_label:
- i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
- x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
- else: # best class only
- conf, j = x[:, 5:].max(1, keepdim=True)
- x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
- # Filter by class
- if classes is not None:
- x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
- # Apply finite constraint
- # if not torch.isfinite(x).all():
- # x = x[torch.isfinite(x).all(1)]
- # Check shape
- n = x.shape[0] # number of boxes
- if not n: # no boxes
- continue
- elif n > max_nms: # excess boxes
- x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
- # Batched NMS
- c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
- boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
- i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
- if i.shape[0] > max_det: # limit detections
- i = i[:max_det]
- if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
- # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
- iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
- weights = iou * scores[None] # box weights
- x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
- if redundant:
- i = i[iou.sum(1) > 1] # require redundancy
- output[xi] = x[i]
- if (time.time() - t) > time_limit:
- print(f'WARNING: NMS time limit {time_limit}s exceeded')
- break # time limit exceeded
- return output
- def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
- # Strip optimizer from 'f' to finalize training, optionally save as 's'
- x = torch.load(f, map_location=torch.device('cpu'))
- if x.get('ema'):
- x['model'] = x['ema'] # replace model with ema
- for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
- x[k] = None
- x['epoch'] = -1
- x['model'].half() # to FP16
- for p in x['model'].parameters():
- p.requires_grad = False
- torch.save(x, s or f)
- mb = os.path.getsize(s or f) / 1E6 # filesize
- print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
- def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
- # Print mutation results to evolve.txt (for use with train.py --evolve)
- a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
- b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
- c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
- print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
- if bucket:
- url = 'gs://%s/evolve.txt' % bucket
- if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
- os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
- with open('evolve.txt', 'a') as f: # append result
- f.write(c + b + '\n')
- x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
- x = x[np.argsort(-fitness(x))] # sort
- np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
- # Save yaml
- for i, k in enumerate(hyp.keys()):
- hyp[k] = float(x[0, i + 7])
- with open(yaml_file, 'w') as f:
- results = tuple(x[0, :7])
- c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
- f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
- yaml.dump(hyp, f, sort_keys=False)
- if bucket:
- os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
- def apply_classifier(x, model, img, im0):
- # applies a second stage classifier to yolo outputs
- im0 = [im0] if isinstance(im0, np.ndarray) else im0
- for i, d in enumerate(x): # per image
- if d is not None and len(d):
- d = d.clone()
- # Reshape and pad cutouts
- b = xyxy2xywh(d[:, :4]) # boxes
- b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
- b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
- d[:, :4] = xywh2xyxy(b).long()
- # Rescale boxes from img_size to im0 size
- scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
- # Classes
- pred_cls1 = d[:, 5].long()
- ims = []
- for j, a in enumerate(d): # per item
- cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
- im = cv2.resize(cutout, (224, 224)) # BGR
- # cv2.imwrite('test%i.jpg' % j, cutout)
- im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
- im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
- im /= 255.0 # 0 - 255 to 0.0 - 1.0
- ims.append(im)
- pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
- x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
- return x
- def increment_path(path, exist_ok=True, sep=''):
- # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
- path = Path(path) # os-agnostic
- if (path.exists() and exist_ok) or (not path.exists()):
- return str(path)
- else:
- dirs = glob.glob(f"{path}{sep}*") # similar paths
- matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
- i = [int(m.groups()[0]) for m in matches if m] # indices
- n = max(i) + 1 if i else 2 # increment number
- return f"{path}{sep}{n}" # update path
|