From 53170f36c1d7e39d8fc32adefaaf2375a58aa330 Mon Sep 17 00:00:00 2001 From: Giannis Pastaltzidis Date: Mon, 24 Oct 2022 00:04:17 +0300 Subject: [PATCH 1/4] [Add]: TensorRT Support --- SimpleHigherHRNet.py | 158 ++- export.py | 623 +++++++++ misc/utils.py | 116 +- scripts/live-demo.py | 9 +- utils/__init__.py | 71 + utils/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 2301 bytes .../__pycache__/augmentations.cpython-38.pyc | Bin 0 -> 13747 bytes utils/__pycache__/autoanchor.cpython-38.pyc | Bin 0 -> 6508 bytes utils/__pycache__/dataloaders.cpython-38.pyc | Bin 0 -> 41659 bytes utils/__pycache__/downloads.cpython-38.pyc | Bin 0 -> 5324 bytes utils/__pycache__/general.cpython-38.pyc | Bin 0 -> 37920 bytes utils/__pycache__/metrics.cpython-38.pyc | Bin 0 -> 11600 bytes utils/__pycache__/plots.cpython-38.pyc | Bin 0 -> 21672 bytes utils/__pycache__/torch_utils.cpython-38.pyc | Bin 0 -> 16799 bytes utils/activations.py | 103 ++ utils/augmentations.py | 399 ++++++ utils/autoanchor.py | 169 +++ utils/autobatch.py | 72 + utils/aws/__init__.py | 0 utils/aws/mime.sh | 26 + utils/aws/resume.py | 40 + utils/aws/userdata.sh | 27 + utils/callbacks.py | 76 ++ utils/dataloaders.py | 1181 +++++++++++++++++ utils/docker/Dockerfile | 65 + utils/docker/Dockerfile-arm64 | 41 + utils/docker/Dockerfile-cpu | 40 + utils/downloads.py | 189 +++ utils/flask_rest_api/README.md | 73 + utils/flask_rest_api/example_request.py | 19 + utils/flask_rest_api/restapi.py | 48 + utils/general.py | 1083 +++++++++++++++ utils/google_app_engine/Dockerfile | 25 + .../additional_requirements.txt | 4 + utils/google_app_engine/app.yaml | 14 + utils/loggers/__init__.py | 404 ++++++ utils/loggers/clearml/README.md | 222 ++++ utils/loggers/clearml/__init__.py | 0 utils/loggers/clearml/clearml_utils.py | 157 +++ utils/loggers/clearml/hpo.py | 84 ++ utils/loggers/comet/README.md | 256 ++++ utils/loggers/comet/__init__.py | 508 +++++++ utils/loggers/comet/comet_utils.py | 150 +++ utils/loggers/comet/hpo.py | 118 ++ utils/loggers/comet/optimizer_config.json | 209 +++ utils/loggers/wandb/README.md | 162 +++ utils/loggers/wandb/__init__.py | 0 utils/loggers/wandb/log_dataset.py | 27 + utils/loggers/wandb/sweep.py | 41 + utils/loggers/wandb/sweep.yaml | 143 ++ utils/loggers/wandb/wandb_utils.py | 589 ++++++++ utils/loss.py | 234 ++++ utils/metrics.py | 368 +++++ utils/plots.py | 575 ++++++++ utils/segment/__init__.py | 0 .../__pycache__/__init__.cpython-38.pyc | Bin 0 -> 173 bytes .../__pycache__/general.cpython-38.pyc | Bin 0 -> 4575 bytes utils/segment/augmentations.py | 104 ++ utils/segment/dataloaders.py | 330 +++++ utils/segment/general.py | 134 ++ utils/segment/loss.py | 186 +++ utils/segment/metrics.py | 210 +++ utils/segment/plots.py | 143 ++ utils/torch_utils.py | 430 ++++++ utils/triton.py | 85 ++ 65 files changed, 10526 insertions(+), 14 deletions(-) create mode 100644 export.py create mode 100644 utils/__init__.py create mode 100644 utils/__pycache__/__init__.cpython-38.pyc create mode 100644 utils/__pycache__/augmentations.cpython-38.pyc create mode 100644 utils/__pycache__/autoanchor.cpython-38.pyc create mode 100644 utils/__pycache__/dataloaders.cpython-38.pyc create mode 100644 utils/__pycache__/downloads.cpython-38.pyc create mode 100644 utils/__pycache__/general.cpython-38.pyc create mode 100644 utils/__pycache__/metrics.cpython-38.pyc create mode 100644 utils/__pycache__/plots.cpython-38.pyc create mode 100644 utils/__pycache__/torch_utils.cpython-38.pyc create mode 100644 utils/activations.py create mode 100644 utils/augmentations.py create mode 100644 utils/autoanchor.py create mode 100644 utils/autobatch.py create mode 100644 utils/aws/__init__.py create mode 100644 utils/aws/mime.sh create mode 100644 utils/aws/resume.py create mode 100644 utils/aws/userdata.sh create mode 100644 utils/callbacks.py create mode 100644 utils/dataloaders.py create mode 100644 utils/docker/Dockerfile create mode 100644 utils/docker/Dockerfile-arm64 create mode 100644 utils/docker/Dockerfile-cpu create mode 100644 utils/downloads.py create mode 100644 utils/flask_rest_api/README.md create mode 100644 utils/flask_rest_api/example_request.py create mode 100644 utils/flask_rest_api/restapi.py create mode 100644 utils/general.py create mode 100644 utils/google_app_engine/Dockerfile create mode 100644 utils/google_app_engine/additional_requirements.txt create mode 100644 utils/google_app_engine/app.yaml create mode 100644 utils/loggers/__init__.py create mode 100644 utils/loggers/clearml/README.md create mode 100644 utils/loggers/clearml/__init__.py create mode 100644 utils/loggers/clearml/clearml_utils.py create mode 100644 utils/loggers/clearml/hpo.py create mode 100644 utils/loggers/comet/README.md create mode 100644 utils/loggers/comet/__init__.py create mode 100644 utils/loggers/comet/comet_utils.py create mode 100644 utils/loggers/comet/hpo.py create mode 100644 utils/loggers/comet/optimizer_config.json create mode 100644 utils/loggers/wandb/README.md create mode 100644 utils/loggers/wandb/__init__.py create mode 100644 utils/loggers/wandb/log_dataset.py create mode 100644 utils/loggers/wandb/sweep.py create mode 100644 utils/loggers/wandb/sweep.yaml create mode 100644 utils/loggers/wandb/wandb_utils.py create mode 100644 utils/loss.py create mode 100644 utils/metrics.py create mode 100644 utils/plots.py create mode 100644 utils/segment/__init__.py create mode 100644 utils/segment/__pycache__/__init__.cpython-38.pyc create mode 100644 utils/segment/__pycache__/general.cpython-38.pyc create mode 100644 utils/segment/augmentations.py create mode 100644 utils/segment/dataloaders.py create mode 100644 utils/segment/general.py create mode 100644 utils/segment/loss.py create mode 100644 utils/segment/metrics.py create mode 100644 utils/segment/plots.py create mode 100644 utils/torch_utils.py create mode 100644 utils/triton.py diff --git a/SimpleHigherHRNet.py b/SimpleHigherHRNet.py index 3dda437..a11a2ec 100644 --- a/SimpleHigherHRNet.py +++ b/SimpleHigherHRNet.py @@ -4,12 +4,88 @@ import numpy as np import torch from torchvision.transforms import transforms - +import tensorrt as trt from models.higherhrnet import HigherHRNet from misc.HeatmapParser import HeatmapParser -from misc.utils import get_multi_scale_size, resize_align_multi_scale, get_multi_stage_outputs, aggregate_results, get_final_preds, bbox_iou - - +from misc.utils import get_multi_scale_size, resize_align_multi_scale, get_multi_stage_outputs, aggregate_results, get_final_preds, bbox_iou,TRTModule_hrnet +from collections import OrderedDict,namedtuple +# from cuda import cuda, nvrtc + +# class HostDeviceMem(object): +# def __init__(self, host_mem, device_mem): +# self.host = host_mem +# self.device = device_mem + +# def __str__(self): +# return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device) + +# def __repr__(self): +# return self.__str__() +# class TrtModel: + +# def __init__(self,engine_path,max_batch_size=1,dtype=np.float32): + +# self.engine_path = engine_path +# self.dtype = dtype +# self.logger = trt.Logger(trt.Logger.WARNING) +# self.runtime = trt.Runtime(self.logger) +# self.engine = self.load_engine(self.runtime, self.engine_path) +# self.max_batch_size = max_batch_size +# self.inputs, self.outputs, self.bindings, self.stream = self.allocate_buffers() +# self.context = self.engine.create_execution_context() + + + +# @staticmethod +# def load_engine(trt_runtime, engine_path): +# trt.init_libnvinfer_plugins(None, "") +# with open(engine_path, 'rb') as f: +# engine_data = f.read() +# engine = trt_runtime.deserialize_cuda_engine(engine_data) +# return engine + +# def allocate_buffers(self): + +# inputs = [] +# outputs = [] +# bindings = [] +# # stream = cuda.Stream() +# err, stream = cuda.cuStreamCreate(0) + +# for binding in self.engine: +# size = trt.volume(self.engine.get_binding_shape(binding)) * self.max_batch_size +# err, dXclass = cuda.cuMemAlloc(size) +# err, dYclass = cuda.cuMemAlloc(size) +# err, dOutclass = cuda.cuMemAlloc(size) +# host_mem = cuda.pagelocked_empty(size, self.dtype) +# device_mem = cuda.mem_alloc(host_mem.nbytes) + +# bindings.append(int(device_mem)) + +# if self.engine.binding_is_input(binding): +# inputs.append(HostDeviceMem(host_mem, device_mem)) +# else: +# outputs.append(HostDeviceMem(host_mem, device_mem)) + +# return inputs, outputs, bindings, stream + + +# def __call__(self,x:np.ndarray,batch_size=2): + +# x = x.astype(self.dtype) + +# np.copyto(self.inputs[0].host,x.ravel()) + +# for inp in self.inputs: +# cuda.memcpy_htod_async(inp.device, inp.host, self.stream) + +# self.context.execute_async(batch_size=batch_size, bindings=self.bindings, stream_handle=self.stream.handle) +# for out in self.outputs: +# cuda.memcpy_dtoh_async(out.host, out.device, self.stream) + + +# self.stream.synchronize() +# return [out.host.reshape(batch_size,-1) for out in self.outputs] class SimpleHigherHRNet: """ SimpleHigherHRNet class. @@ -30,7 +106,8 @@ def __init__(self, filter_redundant_poses=True, max_nof_people=30, max_batch_size=32, - device=torch.device("cpu")): + device=torch.device("cpu"), + trt_=True): """ Initializes a new SimpleHigherHRNet object. HigherHRNet is initialized on the torch.device("device") and @@ -74,6 +151,7 @@ def __init__(self, self.max_nof_people = max_nof_people self.max_batch_size = max_batch_size self.device = device + self.trt_=trt_ # assert nof_joints in (14, 15, 17) if self.nof_joints == 14: @@ -96,7 +174,36 @@ def __init__(self, # fix issue with official high-resolution weights checkpoint = OrderedDict([(k[2:] if k[:2] == '1.' else k, v) for k, v in checkpoint.items()]) self.model.load_state_dict(checkpoint) - + # if True: + # import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download + # # check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 + # if device.type == 'cpu': + # device = torch.device('cuda:0') + # Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + # # logger = trt.Logger(trt.Logger.INFO) + # with open(w, 'rb') as f, trt.Runtime(logger) as runtime: + # model = runtime.deserialize_cuda_engine(f.read()) + # context = model.create_execution_context() + # bindings = OrderedDict() + # output_names = [] + # fp16 = False # default updated below + # dynamic = False + # for i in range(model.num_bindings): + # name = model.get_binding_name(i) + # dtype = trt.nptype(model.get_binding_dtype(i)) + # if model.binding_is_input(i): + # if -1 in tuple(model.get_binding_shape(i)): # dynamic + # dynamic = True + # context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2])) + # if dtype == np.float16: + # fp16 = True + # else: # output + # output_names.append(name) + # shape = tuple(context.get_binding_shape(i)) + # im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) + # bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) + # binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) + # batch_size = bindings['images'].shape[0] if 'cuda' in str(self.device): print("device: 'cuda' - ", end="") @@ -114,10 +221,40 @@ def __init__(self, print("device: 'cpu'") else: raise ValueError('Wrong device name.') - - self.model = self.model.to(device) - self.model.eval() - + if not trt_: + self.model = self.model.to(device) + self.model.eval() + else: + # import pycuda.driver as cuda + # self.model = TrtModel('pose_higher_hrnet_w32_512.engine') + if device.type == 'cpu': + device = torch.device('cuda:0') + self.model=TRTModule_hrnet(path='pose_higher_hrnet_w32_512.engine',device=self.device) + # Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + # logger = trt.Logger(trt.Logger.INFO) + # with open('pose_higher_hrnet_w32_512.engine', 'rb') as f, trt.Runtime(logger) as runtime: + # self.model = runtime.deserialize_cuda_engine(f.read()) + # self.context = self.model.create_execution_context() + # self.bindings = OrderedDict() + # self.output_names = [] + # fp16 = False # default updated below + # dynamic = False + # for i in range(self.model.num_bindings): + # name = self.model.get_binding_name(i) + # dtype = trt.nptype(self.model.get_binding_dtype(i)) + # if self.model.binding_is_input(i): + # if -1 in tuple(self.model.get_binding_shape(i)): # dynamic + # dynamic = True + # self.context.set_binding_shape(i, tuple(self.model.get_profile_shape(0, i)[2])) + # if dtype == np.float16: + # fp16 = True + # else: # output + # self.output_names.append(name) + # shape = tuple(self.context.get_binding_shape(i)) + # im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) + # self.bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) + # self.binding_addrs = OrderedDict((n, d.ptr) for n, d in self.bindings.items()) + # self.batch_size = self.bindings['images'].shape[0] self.output_parser = HeatmapParser(num_joints=self.nof_joints, joint_set=self.joint_set, max_num_people=self.max_nof_people, @@ -201,6 +338,7 @@ def _predict_batch(self, image): image = image.to(self.device) images.append(image) images = torch.cat(images) + # images=images # inference # output: list of HigherHRNet outputs (heatmaps) diff --git a/export.py b/export.py new file mode 100644 index 0000000..4e07bc5 --- /dev/null +++ b/export.py @@ -0,0 +1,623 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit + +Format | `export.py --include` | Model +--- | --- | --- +PyTorch | - | yolov5s.pt +TorchScript | `torchscript` | yolov5s.torchscript +ONNX | `onnx` | yolov5s.onnx +OpenVINO | `openvino` | yolov5s_openvino_model/ +TensorRT | `engine` | yolov5s.engine +CoreML | `coreml` | yolov5s.mlmodel +TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ +TensorFlow GraphDef | `pb` | yolov5s.pb +TensorFlow Lite | `tflite` | yolov5s.tflite +TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite +TensorFlow.js | `tfjs` | yolov5s_web_model/ +PaddlePaddle | `paddle` | yolov5s_paddle_model/ + +Requirements: + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU + +Usage: + $ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... + +Inference: + $ python detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s_paddle_model # PaddlePaddle + +TensorFlow.js: + $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example + $ npm install + $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model + $ npm start +""" + +import argparse +import json +import os +import platform +import re +import subprocess +import sys +import time +import warnings +from pathlib import Path + +import pandas as pd +import torch +from torch.utils.mobile_optimizer import optimize_for_mobile + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +if platform.system() != 'Windows': + ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +# from models.experimental import attempt_load +# from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel +from utils.dataloaders import LoadImages +from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, + check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) +from utils.torch_utils import select_device, smart_inference_mode +from models.higherhrnet import HigherHRNet + +MACOS = platform.system() == 'Darwin' # macOS environment + + +def export_formats(): + # YOLOv5 export formats + x = [ + ['PyTorch', '-', '.pt', True, True], + ['TorchScript', 'torchscript', '.torchscript', True, True], + ['ONNX', 'onnx', '.onnx', True, True], + ['OpenVINO', 'openvino', '_openvino_model', True, False], + ['TensorRT', 'engine', '.engine', False, True], + ['CoreML', 'coreml', '.mlmodel', True, False], + ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], + ['TensorFlow GraphDef', 'pb', '.pb', True, True], + ['TensorFlow Lite', 'tflite', '.tflite', True, False], + ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], + ['TensorFlow.js', 'tfjs', '_web_model', False, False], + ['PaddlePaddle', 'paddle', '_paddle_model', True, True],] + return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) + + +def try_export(inner_func): + # YOLOv5 export decorator, i..e @try_export + inner_args = get_default_args(inner_func) + + def outer_func(*args, **kwargs): + prefix = inner_args['prefix'] + try: + with Profile() as dt: + f, model = inner_func(*args, **kwargs) + LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') + return f, model + except Exception as e: + LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') + return None, None + + return outer_func + + +@try_export +def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): + # YOLOv5 TorchScript model export + LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') + f = file.with_suffix('.torchscript') + + ts = torch.jit.trace(model, im, strict=False) + d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} + extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() + if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html + optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) + else: + ts.save(str(f), _extra_files=extra_files) + return f, None + + +@try_export +def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): + # YOLOv5 ONNX export + check_requirements('onnx') + import onnx + + LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') + f = file.with_suffix('.onnx') + + output_names = ['output0', 'output1'] #if isinstance(model, SegmentationModel) else ['output0'] + if dynamic: + dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640) + if isinstance(model, SegmentationModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160) + elif isinstance(model, DetectionModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + + torch.onnx.export( + model.cpu() if dynamic else model, # --dynamic only compatible with cpu + im.cpu() if dynamic else im, + f, + verbose=False, + opset_version=opset, + do_constant_folding=True, + input_names=['images'], + output_names=output_names, + dynamic_axes=dynamic or None) + + # Checks + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + + # Metadata + d = {'stride': int(max(model.stride)), 'names': model.names} + for k, v in d.items(): + meta = model_onnx.metadata_props.add() + meta.key, meta.value = k, str(v) + onnx.save(model_onnx, f) + + # Simplify + if simplify: + try: + cuda = torch.cuda.is_available() + check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) + import onnxsim + + LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify(model_onnx) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + LOGGER.info(f'{prefix} simplifier failure: {e}') + return f, model_onnx + + +@try_export +def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): + # YOLOv5 OpenVINO export + check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + + LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') + f = str(file).replace('.pt', f'_openvino_model{os.sep}') + + cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" + subprocess.run(cmd.split(), check=True, env=os.environ) # export + yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml + return f, None + + +@try_export +def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')): + # YOLOv5 Paddle export + check_requirements(('paddlepaddle', 'x2paddle')) + import x2paddle + from x2paddle.convert import pytorch2paddle + + LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...') + f = str(file).replace('.pt', f'_paddle_model{os.sep}') + + pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export + yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml + return f, None + + +@try_export +def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): + # YOLOv5 CoreML export + check_requirements('coremltools') + import coremltools as ct + + LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') + f = file.with_suffix('.mlmodel') + + ts = torch.jit.trace(model, im, strict=False) # TorchScript model + ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) + bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) + if bits < 32: + if MACOS: # quantization only supported on macOS + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning + ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) + else: + print(f'{prefix} quantization only supported on macOS, skipping...') + ct_model.save(f) + return f, ct_model + + +@try_export +def export_engine(model, im, file, half, dynamic, simplify, workspace=2, verbose=False, prefix=colorstr('TensorRT:')): + # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt + assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' + try: + import tensorrt as trt + except Exception: + if platform.system() == 'Linux': + check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') + import tensorrt as trt + + if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + # grid = model.model[-1].anchor_grid + # model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] + # export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 + # model.model[-1].anchor_grid = grid + export_onnx(model, im, file, 12, dynamic, simplify) + else: # TensorRT >= 8 + check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 + export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 + onnx = file.with_suffix('.onnx') + + LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + assert onnx.exists(), f'failed to export ONNX file: {onnx}' + f = file.with_suffix('.engine') # TensorRT engine file + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnx)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + for inp in inputs: + LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}') + for out in outputs: + LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}') + + if dynamic: + if im.shape[0] <= 1: + LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument") + profile = builder.create_optimization_profile() + for inp in inputs: + profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) + config.add_optimization_profile(profile) + + LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}') + if builder.platform_has_fast_fp16 : + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + return f, None + + +@try_export +def export_saved_model(model, + im, + file, + dynamic, + tf_nms=False, + agnostic_nms=False, + topk_per_class=100, + topk_all=100, + iou_thres=0.45, + conf_thres=0.25, + keras=False, + prefix=colorstr('TensorFlow SavedModel:')): + # YOLOv5 TensorFlow SavedModel export + try: + import tensorflow as tf + except Exception: + check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}") + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + + from models.tf import TFModel + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = str(file).replace('.pt', '_saved_model') + batch_size, ch, *imgsz = list(im.shape) # BCHW + + tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) + im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow + _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) + outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) + keras_model.trainable = False + keras_model.summary() + if keras: + keras_model.save(f, save_format='tf') + else: + spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(spec) + frozen_func = convert_variables_to_constants_v2(m) + tfm = tf.Module() + tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec]) + tfm.__call__(im) + tf.saved_model.save(tfm, + f, + options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version( + tf.__version__, '2.6') else tf.saved_model.SaveOptions()) + return f, keras_model + + +@try_export +def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): + # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = file.with_suffix('.pb') + + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) + frozen_func = convert_variables_to_constants_v2(m) + frozen_func.graph.as_graph_def() + tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) + return f, None + + +@try_export +def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): + # YOLOv5 TensorFlow Lite export + import tensorflow as tf + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + batch_size, ch, *imgsz = list(im.shape) # BCHW + f = str(file).replace('.pt', '-fp16.tflite') + + converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + converter.target_spec.supported_types = [tf.float16] + converter.optimizations = [tf.lite.Optimize.DEFAULT] + if int8: + from models.tf import representative_dataset_gen + dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) + converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.target_spec.supported_types = [] + converter.inference_input_type = tf.uint8 # or tf.int8 + converter.inference_output_type = tf.uint8 # or tf.int8 + converter.experimental_new_quantizer = True + f = str(file).replace('.pt', '-int8.tflite') + if nms or agnostic_nms: + converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) + + tflite_model = converter.convert() + open(f, "wb").write(tflite_model) + return f, None + + +@try_export +def export_edgetpu(file, prefix=colorstr('Edge TPU:')): + # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ + cmd = 'edgetpu_compiler --version' + help_url = 'https://coral.ai/docs/edgetpu/compiler/' + assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' + if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: + LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') + sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system + for c in ( + 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', + 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', + 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): + subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) + ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] + + LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') + f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model + f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model + + cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" + subprocess.run(cmd.split(), check=True) + return f, None + + +@try_export +def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): + # YOLOv5 TensorFlow.js export + check_requirements('tensorflowjs') + import tensorflowjs as tfjs + + LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') + f = str(file).replace('.pt', '_web_model') # js dir + f_pb = file.with_suffix('.pb') # *.pb path + f_json = f'{f}/model.json' # *.json path + + cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ + f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' + subprocess.run(cmd.split()) + + json = Path(f_json).read_text() + with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order + subst = re.sub( + r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' + r'"Identity_1": {"name": "Identity_1"}, ' + r'"Identity_2": {"name": "Identity_2"}, ' + r'"Identity_3": {"name": "Identity_3"}}}', json) + j.write(subst) + return f, None + + +@smart_inference_mode() +def run( + data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' + weights=ROOT / 'pose_higher_hrnet_w32_512', # weights path + imgsz=(512, 512), # image (height, width) + batch_size=1, # batch size + device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu + include=('torchscript', 'onnx'), # include formats + half=False, # FP16 half-precision export + inplace=False, # set YOLOv5 Detect() inplace=True + keras=False, # use Keras + optimize=False, # TorchScript: optimize for mobile + int8=False, # CoreML/TF INT8 quantization + dynamic=False, # ONNX/TF/TensorRT: dynamic axes + simplify=False, # ONNX: simplify model + opset=12, # ONNX: opset version + verbose=False, # TensorRT: verbose log + workspace=4, # TensorRT: workspace size (GB) + nms=False, # TF: add NMS to model + agnostic_nms=False, # TF: add agnostic NMS to model + topk_per_class=100, # TF.js NMS: topk per class to keep + topk_all=100, # TF.js NMS: topk for all classes to keep + iou_thres=0.45, # TF.js NMS: IoU threshold + conf_thres=0.25, # TF.js NMS: confidence threshold +): + t = time.time() + include = [x.lower() for x in include] # to lowercase + fmts = tuple(export_formats()['Argument'][1:]) # --include arguments + flags = [x in include for x in fmts] + assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' + jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans + file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights + + # Load PyTorch model + device = select_device(device) + if half: + assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' + assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' + # model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model + model = HigherHRNet(32,17) + model.load_state_dict(torch.load('pose_higher_hrnet_w32_512.pth')) + model.cuda() + # Checks + imgsz *= 2 if len(imgsz) == 1 else 1 # expand + if optimize: + assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' + + # Input + # # gs = int(max(model.stride)) # grid size (max stride) + # imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples + im = torch.zeros(batch_size, 3, 512,960).to(device) # image size(1,3,320,192) BCHW iDetection + + # Update model + model.eval() + # for k, m in model.named_modules(): + # if isinstance(m, Detect): + # m.inplace = inplace + # m.dynamic = dynamic + # m.export = True + + for _ in range(2): + y = model(im) # dry runs + if half and not coreml: + im, model = im.half(), model.half() # to FP16 + # shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape + # print(y) + # shape = y.shape + # metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata + # LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)") + + # Exports + f = [''] * len(fmts) # exported filenames + warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning + if jit: # TorchScript + f[0], _ = export_torchscript(model, im, file, optimize) + if engine: # TensorRT required before ONNX + f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) + if onnx or xml: # OpenVINO requires ONNX + f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify) + if xml: # OpenVINO + f[3], _ = export_openvino(file, metadata, half) + if coreml: # CoreML + f[4], _ = export_coreml(model, im, file, int8, half) + if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats + assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' + assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' + f[5], s_model = export_saved_model(model.cpu(), + im, + file, + dynamic, + tf_nms=nms or agnostic_nms or tfjs, + agnostic_nms=agnostic_nms or tfjs, + topk_per_class=topk_per_class, + topk_all=topk_all, + iou_thres=iou_thres, + conf_thres=conf_thres, + keras=keras) + if pb or tfjs: # pb prerequisite to tfjs + f[6], _ = export_pb(s_model, file) + if tflite or edgetpu: + f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) + if edgetpu: + f[8], _ = export_edgetpu(file) + if tfjs: + f[9], _ = export_tfjs(file) + if paddle: # PaddlePaddle + f[10], _ = export_paddle(model, im, file, metadata) + + # Finish + f = [str(x) for x in f if x] # filter out '' and None + if any(f): + cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type + dir = Path('segment' if seg else 'classify' if cls else '') + h = '--half' if half else '' # --half FP16 inference arg + s = "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ + "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference" if seg else '' + LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' + f"\nResults saved to {colorstr('bold', file.parent.resolve())}" + f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" + f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" + f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" + f"\nVisualize: https://netron.app") + return f # return list of exported files/dirs + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'pose_higher_hrnet_w32_512.pt', help='model.pt path(s)') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--half', action='store_true', help='FP16 half-precision export') + parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') + parser.add_argument('--keras', action='store_true', help='TF: use Keras') + parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') + parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') + parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') + parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') + parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') + parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') + parser.add_argument('--workspace', type=int, default=1, help='TensorRT: workspace size (GB)') + parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') + parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model') + parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') + parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') + parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') + parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') + parser.add_argument( + '--include', + nargs='+', + default=['torchscript'], + help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle') + opt = parser.parse_args() + print_args(vars(opt)) + return opt + + +def main(opt): + for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/misc/utils.py b/misc/utils.py index cab8945..d07acab 100644 --- a/misc/utils.py +++ b/misc/utils.py @@ -2,7 +2,9 @@ import munkres import numpy as np import torch - +from collections import OrderedDict,namedtuple +import tensorrt as trt +starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True) # solution proposed in https://github.com/pytorch/pytorch/issues/229#issuecomment-299424875 def flip_tensor(tensor, dim=0): @@ -370,7 +372,17 @@ def get_multi_stage_outputs(model, image, # but it could also be (no checkpoints with this configuration) # [(batch, nof_joints*2, height//4, width//4), (batch, nof_joints*2, height//2, width//2), (batch, nof_joints, height, width)] if len(image) <= max_batch_size: + print(image.size()) + # starter.record() + outputs = model(image) + + ender.record() + # WAIT FOR GPU SYNC + # torch.cuda.synchronize() + # curr_time = starter.elapsed_time(ender) + # print(curr_time) + else: outputs = [ torch.empty((image.shape[0], nof_joints * 2, image.shape[-2] // 4, image.shape[-1] // 4), @@ -593,3 +605,105 @@ def get_final_preds(grouped_joints, center, scale, heatmap_size): return final_results # # + +def torch_device_from_trt(device): + if device == trt.TensorLocation.DEVICE: + return torch.device("cuda") + elif device == trt.TensorLocation.HOST: + return torch.device("cpu") + else: + return TypeError("%s is not supported by torch" % device) +def torch_dtype_from_trt(dtype): + if dtype == trt.int8: + return torch.int8 + elif trt.__version__ >= '7.0' and dtype == trt.bool: + return torch.bool + elif dtype == trt.int32: + return torch.int32 + elif dtype == trt.float16: + return torch.float16 + elif dtype == trt.float32: + return torch.float32 + else: + raise TypeError("%s is not supported by torch" % dtype) +class TRTModule_hrnet(torch.nn.Module): + def __init__(self, engine=None, input_names=None, output_names=None, input_flattener=None, output_flattener=None,path=None,device=None): + super(TRTModule_hrnet, self).__init__() + # self._register_state_dict_hook(TRTModule._on_state_dict) + # self.engine = engine + logger = trt.Logger(trt.Logger.INFO) + with open(path, 'rb') as f, trt.Runtime(logger) as runtime: + self.engine = runtime.deserialize_cuda_engine(f.read()) + if self.engine is not None: + self.context = self.engine.create_execution_context() + self.input_names = ['images'] + self.output_names = [] + self.input_flattener = input_flattener + self.output_flattener = output_flattener + Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + + # with open(path, 'rb') as f, trt.Runtime(logger) as runtime: + # self.model = runtime.deserialize_cuda_engine(f.read()) + # self.context = self.model.create_execution_context() + self.bindings = OrderedDict() + # self.output_names = [] + fp16 = False # default updated below + dynamic = False + for i in range(self.engine.num_bindings): + name = self.engine.get_binding_name(i) + dtype = trt.nptype(self.engine.get_binding_dtype(i)) + if self.engine.binding_is_input(i): + if -1 in tuple(self.engine.get_binding_shape(i)): # dynamic + dynamic = True + self.context.set_binding_shape(i, tuple(self.engine.get_profile_shape(0, i)[2])) + if dtype == np.float16: + fp16 = True + else: # output + self.output_names.append(name) + shape = tuple(self.context.get_binding_shape(i)) + im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) + self.bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) + self.binding_addrs = OrderedDict((n, d.ptr) for n, d in self.bindings.items()) + self.batch_size = self.bindings['images'].shape[0] + + + + def forward(self, *inputs): + bindings = [None] * (len(self.input_names) + len(self.output_names)) + + if self.input_flattener is not None: + inputs = self.input_flattener.flatten(inputs) + + for i, input_name in enumerate(self.input_names): + idx = self.engine.get_binding_index(input_name) + shape = tuple(inputs[i].shape) + bindings[idx] = inputs[i].contiguous().data_ptr() + self.context.set_binding_shape(idx, shape) + + # create output tensors + outputs = [None] * len(self.output_names) + for i, output_name in enumerate(self.output_names): + idx = self.engine.get_binding_index(output_name) + dtype = torch_dtype_from_trt(self.engine.get_binding_dtype(idx)) + shape = tuple(self.context.get_binding_shape(idx)) + device = torch_device_from_trt(self.engine.get_location(idx)) + output = torch.empty(size=shape, dtype=dtype, device=device) + outputs[i] = output + bindings[idx] = output.data_ptr() + + self.context.execute_async_v2( + bindings, torch.cuda.current_stream().cuda_stream + ) + + if self.output_flattener is not None: + outputs = self.output_flattener.unflatten(outputs) + else: + outputs = tuple(outputs) + if len(outputs) == 1: + outputs = outputs[0] + + return outputs + + def enable_profiling(self): + if not self.context.profiler: + self.context.profiler = trt.Profiler() \ No newline at end of file diff --git a/scripts/live-demo.py b/scripts/live-demo.py index 45e70c6..7c5c174 100644 --- a/scripts/live-demo.py +++ b/scripts/live-demo.py @@ -7,7 +7,7 @@ import torch from vidgear.gears import CamGear import numpy as np - +# import time sys.path.insert(1, os.getcwd()) from SimpleHigherHRNet import SimpleHigherHRNet from misc.visualization import draw_points, draw_skeleton, draw_points_and_skeleton, joints_dict, check_video_rotation @@ -66,17 +66,20 @@ def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, if filename is not None or disable_vidgear: ret, frame = video.read() + frame = cv2.resize(frame,(672,376)) + if not ret: break if rotation_code is not None: frame = cv2.rotate(frame, rotation_code) else: frame = video.read() + frame = cv2.resize(frame,(672,376)) if frame is None: break pts = model.predict(frame) - + # print(pts) if not disable_tracking: boxes, pts = pts @@ -145,7 +148,7 @@ def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, "resnet size (if model is PoseResNet)", type=int, default=32) parser.add_argument("--hrnet_j", "-j", help="hrnet parameters - number of joints", type=int, default=17) parser.add_argument("--hrnet_weights", "-w", help="hrnet parameters - path to the pretrained weights", - type=str, default="./weights/pose_higher_hrnet_w32_512.pth") + type=str, default="./pose_higher_hrnet_w32_512.pth") parser.add_argument("--hrnet_joints_set", help="use the specified set of joints ('coco' and 'mpii' are currently supported)", type=str, default="coco") diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..8403a61 --- /dev/null +++ b/utils/__init__.py @@ -0,0 +1,71 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +utils/initialization +""" + +import contextlib +import platform +import threading + + +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + +class TryExcept(contextlib.ContextDecorator): + # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager + def __init__(self, msg=''): + self.msg = msg + + def __enter__(self): + pass + + def __exit__(self, exc_type, value, traceback): + if value: + print(emojis(f'{self.msg}{value}')) + return True + + +def threaded(func): + # Multi-threads a target function and returns thread. Usage: @threaded decorator + def wrapper(*args, **kwargs): + thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) + thread.start() + return thread + + return wrapper + + +def notebook_init(verbose=True): + # Check system software and hardware + print('Checking setup...') + + import os + import shutil + + from utils.general import check_font, check_requirements, is_colab + from utils.torch_utils import select_device # imports + + check_requirements(('psutil', 'IPython')) + check_font() + + import psutil + from IPython import display # to display images and clear console output + + if is_colab(): + shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory + + # System info + if verbose: + gb = 1 << 30 # bytes to GiB (1024 ** 3) + ram = psutil.virtual_memory().total + total, used, free = shutil.disk_usage("/") + display.clear_output() + s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' + else: + s = '' + + select_device(newline=False) + print(emojis(f'Setup complete ✅ {s}')) + return display diff --git a/utils/__pycache__/__init__.cpython-38.pyc b/utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdea27d3ba5164e2d584e9fa4441860047b57834 GIT binary patch literal 2301 zcmZuy&2Jk;6rY)0uh$#fNu0DQ-#+kBL$QT`1E^Fj0V1eg8=p;B_}zr%5kaN-}}&=c>COWqbwLkmr_RvhK5slG z!#Z#C72X6HpR)&~w>rBXOmvc}L6RpriPB^i>7>YmKfxB+HYBFVTn^;Xn=P2C3q+H1 zdIA0%GWlvr&e;VycR1tD1?Vmb=y$hG{f8vy#j!HANX1EFyktKwr0BWEAE%MtD`aLo zHC0-`D)PAC!gyTZ+M~u%T0SFk1&lraW-ux;G1wnRN=NCSA4l;>40Ivm(O@*$9ZXU! zqjaj1ScTIfEsj18GY}PWFn?kg;v>VbKc0f6tT;#%>QxK~VX_4@Ms|5Pu(=CUy$wVV z0G$Bf4SXbk=vctX(4E6QtZ^S69hkS1v zTwm}@rW#==azHQ)ufgfU*2c1}%kvm@E(Y>fiSjyZN+w^{M8iuD*ct53$WwMoPbr1u z0mRt$PR}v5u}pGp<6z(2bFELt7bkJ3r(*yj4BWP zp)S4tFL>r6fVGWI&qGA>C=+35f-uYqK1q>phT)@0l+I5if-N_YpzbBZm_rf+BileQ zVAinD0>^jkw-*9|vB#{!sZjc7vS&;Av0u>=(Qvk7_XKWs2|H{JQlJGVwXU7s844v&2I?aznvCwl&U-ZOG$gV$K+e6 z9m{F1?M>w7dA+!brj^Yn)~v~QV7K!EgqXDrm|H0K7Z43;WRr<;lF|zLBdn}qSf;6y zxs{XEDmU}08YCR!Zd%X$$^*QhU*f&eIR*6WtwvRc)k}kHrSjpVc1mPkvV$hCV-J18 z;2V*@@t|sy_4C#RIcQe_;J?aOF6fU=wNf>!R_ULwVLW)Vr044wp@9>Q$;2)Guhi{hcf6AuBMssa@w!SqI@gYR9 ztAw76`~AMeDu)0E&>Z8#aIeU<>CAa49!(M{GALPP{6vLukw&}Fa$3-9M65$Dj*?hF z^YO+Cd#0&>{oPcLAYrJf^F%?zp3XK0G3W#x0~KXss1zRQXa}{-J9{XkETl@njl0k3 z;5ofHt9Q4)d#G-8XHNfvy;-As|MTwm_r8U_WkK;_&z9m>XeSrPX_AWx*4hJnR5vcr zjuzeDfBmW3+mM@ZVpsubtwz?(mTmC;eUS_J1IU|jE_+DcMalK~sr^xemY470;VmTZ zBk7}CR*%-xff;Sl1R>*@yvHW(N+H#N;1KT8<)tjbx*0(8sDqG(}aav22`6^h%W|}x;l^@;CiDnSlyM~g>)!8B+TsaIYop;3gxA@x=K@GW z0r4Pa`XFX{FLDowhmm_2xwj$rF7XI*k05tYXqT1Dsnvbf`D(sokL8=Cs$FyQZn<6? zD>iEdN;uXgoyc^?^vw&G&Ye3m<0lI9c40Ah&6W-t_>n@rQkRY^{eEdX`D&wL=Ny}d za{O33H8EFT_O0cWrTN;#^2+i`M)f0eD9e@W`HWt&{^|D`k6GWIDt+VHSAKW&@V`uz ze*4s~S6=(ySEf+;maKU6+n2sIRa)7+_h{q`-<&Fa>zn+WD$vw=e>yV9@i=1uwgN;^ zfFu#22_0Lbi>L?*6DdQ4v2@E%o~X<rKKvZkgBW>xjf1XiWho8`HU9y zd;m1q3Vk5-Uh1R07ddR=uIV>Y`E9%S$e6@NDQ|?1jvpDnfA*H@8}oT7GFs_VpQPXB z)N825@B33I3(oR;Q>DN9(ZQ!b{Iwr+DitLY^38d>T$*?N&{A2r^M1%FG7E5gaB*%%{dBQNt+=MC-9Sbvtw z#Vr?{94+$N1Gy@i+VXJIEmxewy>#u2H&*=UWXZ1C%ME$#K3W0cxS?o@sjS|&ogT(> zxpJ-S=5phcl{&WGIX2$yni=$?_-@Ui9h3&f@RQr|KoT|jv0R5!+5i_~#uTPZ-LbRQNpPAc`79-!oo|$D#ky}S)XOzLb{>tSdq&E- z^rL5%3wDF}9{8Q%*p(t6rs|s*wOC&E)f2ut>#H`OOaKnhh>^}tjUeY71W;5{ji?%) zl%7_tp1=4{jObo%znWAHHLb1=zsa7{iyafNf|>3yhA& zERwj0PHz$^FokDyKU5~s;hRJ*wj*~_)w=+;(F@@Lt>ZEtXB41B6q9~h3q~$t?2l_7 z(XJYo1B~Z=&FJzjH1*Yjug>kAyX%>>$3wjpr0XE57G z&@~g1_tLja;4{$E*J=$vl$V5r`f9m>>Z4Sxm8-rPu#SLa^_so3b85hZ;FCwt#i8R; zaLR}9cZZV>T25!cc+n0oI&<(n3{uNiZBUGr%T?>537MMn3@^+W3@4V^>*FJ;omoD%A~~M@GZs{BXI(tO*QLn?37C zYs54wpo@$liU0XV)y~&^-EoCHkNnQb5L>3{Iz)YnCMjPatmyn`CWPIL*~gG@!9Um3 z)5zYIp=8K9JSI1TPZsglS>(!|O&9?JPBGIME*OBp5C+=x@bopJ{&vjN( zg}$K^4S9M=A5?@kth5YQU#BPJDK1ol4G7t%^;W25w!)y|a4Uk|5mF~S1L}eaij8{F zfMzj5ct}}~dXWvaWx2|_#WkRF)PQ<7^mbjWTNk~pE{<=<@Wj24XKiSlGQC(k753sA zT2aUN3Cu3!5lmpOEmCe+@W-Y%e~O=c7>kzE1c)(gLgXnB`61!1H0;e6spzK!=;(1C zx^Ub~QcfW!>7e~^vsNY%;_HQL6Oix?2yW<<*W6R2R{Dukmo8kI$(=kmGx7N4EAk3; zF~Q1lsCYZ{5#K0O${^UPEm5dncxtxO3q2;#6W=>Y{X+AlJUD*H30~LurAl9)Z=#=* zcYXa@zQW&Nw9bYrO*e^34G0w7t;{nnAd;So7%G6hu7Y6hUb0bSEmWfp0JtQ@d%jaWI*NN+QJ zCKh27K^_|*lAWcKN^Kk?o441zS$m#`j(PkJz8dCvL^enmgr-ZFCjE|Qv`r!dSp$Fu z7NsM(P_CAtT0sf1&&jg542BWNji`K@+CB@=IVw8f_dRu`lbtztlC!abwDYdbngCcb zsZM^}X7z5>ejGBO(*b?ysC%;*1Du;qJ{yACH*aV4xsuH4E6r>2QTqPfT|g|;Pt0Sy zTxW0=R&kUo+6V-7Qaz%95nMxln}INlf-e2v3*09Oj4fB62Q@;%@o}|}08%-S#_OsJ zYNumc(4RH}nr}tCuooIpyohIxfUgO4k3tl`ZfsyYH{?ZOK7e(c@ z0kqiV4V6MLPuM7GsaNMoy09K$PRsXU>jLgELn-1OU=T)rx$IpZ`OpIpq|7>L{7ds@2g9Gh_w`7TR!S* zD|nVC@SgBhM?Oi7^yQ;-zP>_Wd4f^^+T46rn_EJ=0@@d-O`h5SEKg8sBEJ)lqNK=| z3qV^!aECNP1(ZJu92x{4NI@OA4_x4YdaovLq7DWCNq+{n^i5LiXu%V48wa;6%a%_uvEsSOao^d!Xk5^Fq#(Xazjs zT*Ql@Ulf>P1g*RX;T_`+mC#u2612mq^LQ4O1N(w+SyaYY2;77${F)mA%ZYiAbDWA( z3Ydr5+A+_7=?Pa0=!IGb2X7nx_{N>RT|97w2~zGb9Q$pQlw?i#o=B z&|DqaqAh3cVMrgGoKIwKC*}Ifr?Hnz#iF@SYaPbz2a1g@N-#Z%MDuqg)Jn zcJxNL1ug)Ij2RUnL%}GmHntOv2Pn^q9ymJ{Fd%*dxtRc}+pDG){o zVy9_m;&K@+eXH!0Yp@4v1zWCAfnF`w)mN=&kZe@UQ$H8KGNE}1ZYBgo;Y>qi@@0R2#;f89HLB62QzJxmL0OU<71uDW5#}i8_kVe5?t3;f{q9_J+|?YWq+gr}AJ2h%5Bgy9tk+c>~_S zO+yOTf}=GJw;9{NC?CYvlHPzAD3Z+H27Izq}>?7 z9l&T<&zersc2c>iT|ID9tvyRKp;?nf(4Gl-rDz^H)J6l&K^>1F;7}xay*}7(t>N4{ zU%&H=bve1S#m_%u{>~uqbEG>4sj&lMSBdq8*6waOIa7zkaJzgIE86AlULS%jrFx_7 z_wM!KcAnow(BAr>z*%l+3u&CzuJ)SojS~Al)<*<%6s&Pa4Nj#~g75aUjrO!{*8rb) zLk@Lz6M7Ks;6mE#15WS4dk~m3DMo44%pI` zazv(8rp>IV6lpfO+G%b`Obq2)o&Jyt;GTh<1c#|vfL|Yu&;gXn_XKUbqNp<-7qBMw zQEq|T6mosR70MFg3Q3=BZXL4dbUY3{FpVP2$3vtc9ga2`ptZ+6rHpyt(BNQdwYk1_Rs7kNqm*pI#l2{C!OEpmzsC6=X0udc{p%mM$<43yJE@5Gp zq-{HG7cG-^da?>uRiIkGrpRa*SX+wpnj_teYKX2 zH=V639(kI&JxQ&??e9ZTUJ3j{>dKDe_S3f??oIS{o&!+gL+X%jK}#M4Osi?Yq&BFH z;0#txTJ<3Gf|L3P+^v&HJ)w`NBkDVKxZd<5XfdM6 zSMZGj;Z5g9ia=M^KMe|S!S}(}Az2|$W>_zsfh6yGX2@Ggc~@I7z{4UWW8JW4KsSzf zx@VA{49^*4$1^WOM^HWZU0{0wLZ`H#6_~J(0zH{EWX{723%%JAq1{T+XvN(qtgQ}* zFrddWcY$V}fD8{W!rQZ<$R{w97+YKL)xv`s|F~8H2Z8Sg8o#Hl$GILFcbiK=w-3g_ zmpWz87=kkN!)oY~$4x-iMS>Zx_hFT(4HfnquBP0?LjMM?hORz=R2uw>uBb@!NURS) z%b?wW{V5`gMyrqXVXRy6`U1`d-!W;yabg7OV&E*=;#FV5A;=d2O84{c@23J5NE$J` z!0)B%E`V>raAe^q!->qG{3=zm#>qaVzeLs#HK9d69Qc>a;7U?nqIRqwlerFlRH?|n zrZSHJlYLys{1V+R)SJ=?EYA=Ix_`nq&z`?<;mm12Hdl6+%D{dQp>Gu7UsM*-Mn1Z~<^7ksr6(3K8~hIL!dvzAU5IYdZ1X~rU5JxA9+_V-cE zS56A_+&KPHuqe~8B}b9ot3IM0fOZ^*1{~Mq>u9^p!9+Gk*H`f*V>_{GPg^7%>H^_4 zu%+XhK`Rb#)I;kv z+=an1?UWhavWRO4@=l+0v0N0~EHPCQj({*Yr_(BX=S7eF7UPMlZ{uAaL zU6LlQE3PS$q?N6a`^@s<@KeAVBV32%4wJSguXv{DTQuYqH?lx}Rg0{(l7XB>m#Of2 z!q?$$(V;H-16rYL*EJnqPJ;T9BE6`PT)%q7Q-R?FZ99*}C`swIf9!9k+KI1DNkaE_ z?u%0_z}mU>sZHWXjQqC=T9Oh(5bRFi`;!zb`|eF+aC-tL+;Ozo{0jhxb@GcqvXH>2 zxyau|c{R5Kz2svk9sxGAZ&4tDM+-3Vnr@HO-MTa(b)@vtzUTk*-7kH1NQWQzOx&A5 ze3EF&nz-v+%+0soOTMuTL)bS~7%oG0QW_HU-)ZFkAaHjV96$=n5id4EJ1~X z1SAH@p;7kRB=!Az3ep6ZcAESl+H8{|B-}}=u*FFJMz~4I#sbX%?+xG}v^mM@{u*X@SmYfDG7+B|%QOYj-akx!1yf_tH^J#6me|@U-6#srS)sB#OWvW10N#{pgp{u1V;|ngf z8d?ehHdd1yfC7I5T?JIMs@_6?6Fgh7g%EjN>D~yzD2n_FN=#rH-$B6x296GWA}XIH z3*WO|Kwwi4=5h69+%aK`H9$6wqO_VcfxyS(a>eA+4`(qgFpJpgruv2S(yODrD#``4oog`gRPE}-)viu zl`{s5WUSDX66RTDWvo_r$DC$^!VzpChv`FX{2^8f$eJT-4A|Qp8|e$whVC;Zd97{} z8pN2`I@^!L&kjq)i$FoGIJD1~foW600z+nJC4BN0) zu9f5w& zO{+47p4*U*j;5<=k{$%R3Vz2m%^7te7d-|1M*EDurv5T+CvXdnJ8)P~<{J44!*0Zd z&!q$}516<4O~)OZvw=IVU9#mN#;}8=wHz9A zn+QqFWGwkfOeAoYCr>yIopQa#aX*0}&L;&62hltox|Swo6X!3UICp0H%xv!BnG@4+ zq<6BHXHNqUCurCmdpvN5$v%UzSjSSdQF#85E1>7LA88TvbM6J$ieLO7EIi`o*Q1DG zL_i?|LyZrD1hXORP}GlJU?=&>`m+28`kj@(1-MN^p>g&0;{@li22s0#%X(MCeG&9o zxtBO`QL7>UGzAhuZ^?yV2`V{U6$9c{1@Spw#3-2v{0L3B5P}$#?zZ+AJfA}|`FUz< zaO_O5N!`+0svkw149DWN_0a>=;~{_|Z4;{_Fyi;YxL-|o*Vhe5+ja*_te1EattK@rG5x_g)6%By=*ePmpR`@37MmgMn5d`7G}{(A)Z;( zNb1xK8i__hON!Ef&z}A6xUUt^?|3Y}!=)SCpadax4RGslo9vdh-#H6lioXIq=u@9uN5D_He5X`)7|_ zVETUyLj;z?$B}B^Xa&YWSIG}dbw(Ig*NkEl2KyE2Nw&KDG=a|q-H%fGBmhk5DqM8s z>QbQYc1MHCMk<^mf1jG(0$^(#HcmjeLs*dH<()xYg_cK37e#Ij!2r0AFKowmU}CUT zcqq1r?v^RfqMJNN;Bf+k(Hy1aC)x;KuxW>S7zQHF+aK>oO3=p*^onRnQ&P##$tBD# zwi1^hZqdsn!jd=}-!X5a9m1F{XCMyfDnB1**y0AJOw-W3(mU`1s409v%qDrQTgXIx zRyqq-K<}#qTcY&N#2!W$BKA{woOysd5j)75Omrr1Tt~rmK;#{y0s@C?ivqIXu%N3d z5SWQSPI2%8%rm${h!{15U1&GReWkbU2}as*;B#V&b_*fd*~3`Js2L7cPN*r#!j`)M ze6_lThCOJ|MZXqpNWcIoaCCI%rnK=ch#8c7X23yEP=eRUxwujX(c&{&bYY#nRt>;ogr)|qxO<3qF$Y#9P8EU>1Cy4t%N9n3iU=2$@ocfGTn zU@?ZI60B?ni`W7g#CgKoK%7l}6kzu3ENc~+NDr=kfoea|1)L?eDf#vZ{&S?EAMwiw zw(ck0*8dY!Qn+9y=19?HAFwu5yl4J7RR4E zzO|KRZ7zG)og0pK{;?%r!5Osw$pt}wPteyA956plA`21qfsLTkgosG^4u@}R*g=(T zH?G>aA1XM3=PHRQN|=;E{4K}1a_lHa`*H9XNBjgq39rzguM_wofgcn2IssCrSwCd< zA24n@rJk*45sD_@rD-}wHt&|mu^jrI} g@x6aPv7WO=tUZy471a>6zT@w|t^2HSav+)bKR(??y#N3J literal 0 HcmV?d00001 diff --git a/utils/__pycache__/autoanchor.cpython-38.pyc b/utils/__pycache__/autoanchor.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd0bfb59c7cf51f9f193f14f2f21706813725745 GIT binary patch literal 6508 zcmbVQO>i8?b?)w&o&CWASo{(0$*juBC)>xIol`EUIXVX$RjzX7kc$pUzSpx0a4qYS z1-7@Rr+?n-e(!zX>+g+@<`_O#C-0QmDaQVj9$x%dc({RY@FNtGtue`^sPi?hf5n=i zf6X;h|EAVb_%-TQ!&71MrJLe>#2ISkz30(M%G3+b4R4L%h&QUEi;eU+Nhgx z^ItNVeI|CfoR)bx`iSHGn4FQ5a_SLZ8<*lER-A25=5Dq^|E5>o@Ri*PYxN*^0NL0| zk;i6uUpDZYy{mR^KPNG|f>5>dM>8zjWlp2}9}~ayy@hYk zMiH@1)`PP9yeD)Ch4*=fM`)X8D8)QG#ySH1umxzL-=ximgxZcoFSXAigUT&KZHHDb z9cG}Y@l5PsMAUke~-l#lu*@NIrv|E6v@|~U3Irz71X?{?If!EO1i3;i_^(- z5SvZq%h+tVju)HTHTOYmLD`ygV>75#JwpawVXYjLcCfYE zZZ&DuDoYNIDUoflxf4%faCxhgG?cXPQ&=U)qhR6~FYsyn6}Xzkb31)|b@jWe z%l2>SEn|fn2Y)9RE0YR?&p0X>d<}eqUtyC%Iu5=o)NI5z`KF*_@G_|DTE~nGY_Ty- zTW;Wx3~9>Lo;blesW7!^ZKkCq(@|iV*zGO>B!t1?-fu^Y)+!bloVLe*f%;EYU@qIC^FNm zla10*wl}lSbdRxpSpPlI$#q8LcsTndi$-MOOH)onIXQ{nsb^LvkNWhM`2|;IIM+KC zk=M*5#^n9{uw}MyUbNQyK9gg|S!Xmn9ATJkQTD{PA1P(4vaQD6`8wa(x%BaC!)+LCRk~Lw2&vBiB8_j z>H0~kCvuVlXj4D65?c*lw(72;fO9}5fA=|$Gh43Pl(k0i+kg8teSUwv`jda6&wpLl zZb2>|)Y|Tl50gRAEFa+;oJUc`k$-AqTRHqxB;r!IdMqk1k%&*Te-QW0kJxi#<$wks zq)=!dcy7dE@lwo#nAdefQq16eoV!x51!385G_Spi=YVX}!e9I47cD(lyi)hePCdAW z%Ao&{xFjkqsJ&qul`ij#$h3Iap?L-?m_X8KEZAo{#R(4z8!OM4j&m}4B^FAi^J*U2G zx0(>owc$8!-M+B6<235DRx5DraKlB}M+Up*QG;D+g)QY4Tb~@xr)9;Is#dLG%EJNc zE~Hl&6ZajIO$cEv`y*-~TFO1`N^^((K{aO?5t7Qtj6jeq{P2pIBcR<}jv1xSD(&!o|*i#La zj|k%U6XS_lF$gLFavAwLf!-5=)>I`WvoiN6+sTGz#KIJS_#ZqTqa5o0<1q91+fG_J3SdVD99oftSKr^zSA$v3w zy-_(KCjrT0fHbx_K7iw=scuS6ZLwYfaLaoW2yy^6z$FVOdsE>w;AlD;BhUk4^7p4>wa2I`Ch{0m)-iVC}YOJLL^8uc;r-ZNQvd@Bcuf6W>F zVeJ!I?o$bXE;Dr~+aUzKMn<~X{`K8 zL^NO~nvuqq=)Jnnz7WcTGzPUtlUpn_2zGF0>CH3I#NSg5x=Le?_0Gz3a%x|6X1);j z7otM%HISg0X|{P5bmaUV@61A?*;D5$ZH*v(!&8$X3w1Py_+G@TK|8h^9*C zq7>FYO!)2lI6W}w1?L@@w|z^wh$DfmeLuM9Zu|9ZR}O-kT~U6+2Boce z6yExdpUc_(&rMZD=0K}=fHem(Dci_X01dzMjpX;KImG*-TTRFfCgmdKv8-88d z8whu*=_m)tiq-M>gL8dm9-jO3>nkkS(ycfk{8!&vyfm~NAZyu1;p!#4fOa6+*nvWr zjrNoJAXTuPdKI7%ZZw9b>4P42QPl{Lf!L503kF9Au<8Q7yy4y7TJE1_f6>F!JR0`B z06RB4N}se&QWlSr6vAj#IGVK5$&KI?`1I}qB7Nq%78J2bUA)FyDr>W<= zsDQ75zdZ)&9O809b2;#|2>Hhlmot#17)NZ5n3+0XusX~MFGxG6!yAvdMvy2!FNk*cDE^n0Q!cEx~L zN21HOH0PWK>)fXlR;+-;rzD|kF_kFrmzX6^YZ`g^%>>3ZkuhuR1h!E4T2K7X!6Z~hV8T8pxp47yF;f&?15(14E*<~kbdCWEIEgLglj+% zo)Pj%ThYN8b&W(<(iED%@YEI46s}ClD*+IACk^CoW`ukbzgCpe&uRTOqv0gmW$2j$ zkH>6s1ikXGfh^j07-+kv1ijAfGfiC8SsA#*`fvBUzkTms%wyW7uo16`xfh1 z_x~m0>USD`E!gA4W28OFS6dY~u8Gb3mq*CesUc zd-Bk`>2`o3ORg=vtL>2mxKYAsueL`Q?8K%@@aL;)8B>47m)bKIiF+;4%pjB)UZR!T zY2CbRx6@~93;4ISw;lWsu0V=eMb3n)4pjv$3>D@5N4BFmUYt^X%acb`ub5689~v3{3OD&A zZ=d-F^BbBVws0NegNcLUyGr3s3bm;ZfD`Xiln-ut@&nCsk$({!;SMY9w5kL?v56xH z)d?*AHAnq_<-F=8OqHnm%3K6add~csP8Mpb$x1Li2T=b>*_%tUUzjr#yxbu)hGi z(Vkjuc^a*?P&A9O+xbJ=^gX+sgY^PZ0!qD3Lmq6kSp(>`u60q(jF*SU~YIpf9b4i-%dt+z~UW1P<(pzX7GYkLXG9QT+g& zRtCYHVx#&L1zx;L^*<5P(9Sm$7}>f?aQ}`N71GY z(oLNi-s$80%>mvoQ>IJ-8{n#bh>=Bpw|IW75R_}poyBs!MOTN5+xOKP-f4eVw^568 zHSh=AKze0lbU2sT^qa_&O-gs;Y~WPf5>c#pA^~*bS( zftw0%N6|IFQOrmpiBZS=76KEoBsz|G>}C+Sit_H;CbKh*6GL#eFQY(*Wz&>$*ikn=&q9QTn(kHKiMri^mj^2&H3^ z4pABk$!L>9q+N;)57{IEpsLU#uf$o>za(o25{KHyyrz@T1Yt$65sL;EYvNGV-|BJO zvDm7{qEgk7plJQ6pqRk0ON$X$st-}n-UfG3u!+2|ct&Kbf;l^y>u0Ew2J%;9l3N^^ ztSL}V>a2AZ_q+uJ&RL!xKaN@ktz-Nxgwv!(BgTYY0iH02=@libkc+M^6cHNTki62F zZLtuiOC{--OQpnYQef82oy3lcG$5oRIR$Mg%0NRrnpjQ3VC#t`kOwFC26Pv#6KgF? p5+-$UJdBWW=1OvLd5xwHNKDhjK0uQU^71?{h>VfR<$3<>{{jACnXLc- literal 0 HcmV?d00001 diff --git a/utils/__pycache__/dataloaders.cpython-38.pyc b/utils/__pycache__/dataloaders.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c1e9c3f8684fa517a1942a89f812f17d98f9dc0 GIT binary patch literal 41659 zcmb`w33wdGeJ48I({o}lI0=FvMGh&7A}A6dX^N6)N+QKWq(p%<30)J(;{kdA4loB; z-QY29*p4VilsB}vdD+CVrDUKl@mk5o*__YWykrxv*PEA3_IZgww1s_Huacm3<>?(Sp=e-HORKK8Z+#JEr z`LY=b*`aL6Hf*zGWDU8RSyQgzY*?;V){<)^8M8eTdnFz%^_Bax{SuFr2FiokL5asp>&ipfA&Hx%_1R%5 zdqZ}ETt~_q&5#i)Z_3^k$kQHa)` zv4rm*(~mM+k0@g2Y26wyD@tlcjLHw0e5@t zeYo3)yBF>K_PbE_dy#s;z8|Ug7j~e$T;YEE;KtC5VLxC$h#KG5yno0(g!?@1-@P$p zAGWv6gzbmzN02sUKWZOAS<@)%sQnnqdJHw#xI1Pa$KCP5=DEV$3`(E1pRgadPvM=5 zws|2m`ebcO@_63Om#TTYpqxy;f=|Z1Q*bkjZn5MfZ*z%Ej2eDa6&4D4chvN)bJc3e zPflJ`g}lu;o?7Sf?nUO(sffiFRcRrwoC4ypY;oaau~Zlh`|%T3i!+n?S;vo@F6U

;lC-(?paR~4rg-Ga?p-Ai_PG~uU@mZlcVg^HVZ zi`9zbcjXsnd7HcFT=v&bm-3ENoVk|Ecjg*sK5$jO;>=W4+3}Op)rD)hg}mby{OI(e zTU~VhM5*Aq1vOQ@>PO1OtBVVMKVGn_<=g@~ZJ{vj7B3e@d;HY#qmxH3oS3|jJAV4S zA3byS@yAb`_q!&ZekwP4>imhL$1nKZ(-#ZV^SPjp{eFF`3YQj(s=y|*1(M}jzFhJX z)1^YblEXLq;pxl!{dm#IO;=0#DL;YOe13MeRPfVILB7`6j}{gjKYI0AW&hP{e&Xu2 zD;E(#G|4Cvb^PizM3I0l+~ii(^u=6TzvgNMRn5&5mE+cqTqqPW7hQM3IW#^#TXZij zPK`}h%j1hBbXn<|Tby>ruT@Lc%lD68Db5$iCsn>!*>h~sajWG$9Kg|JHat~c0MM+= z`qtb+Vb%}NEzD-a<%McCyih^REzV>u1PHGbrjT`Eraan}4dG$Vwx*m4L9(VFO} z02nA_v2gx&An-yjZbqS@ITY&|%Z4%~jtZ8IX8`*znKp`f%b36_GOLbnEnq$R(ZW@% z7srpy7F?%TD`4@OV|#sL;jNH*H%fi`squ@|a$$Uy^F3c0ADhl&@{P-s7{^$*k8@6s zbFR##p~eL|jJKAiGq!*kcX+l?DO_Dpj~ryfqX?{!8A^p}8_|N?#k_NI>~N_%oi90$ zj5YK6q0zV>b{5Nij5$ihDGaZDKaw!*qlL8I|nv_pA767M7+>ks4XQ*N^rw>hH}gVDG;g$rR%#VuTQ*#mKBabZCf9LGcY_fDq1rc;XXc-;j8Fe9#mQCpdbKX*Ot~nRR&%!T)UlhL>esTN~cKG#J-SAAT zhp-p6t@(tSLs>~Lj1+d9@i4-A*D^akWk(L{!(E`lx$si)BWqp{nB5Vtv9Z*y>N z6B?-}8H+K-sXO6YY{+Q0EPt6VVN4uFD%kHidFJ%F+?f+6Cv)ddKYnV`Po6(Jd35qb zZr_7@gNKu6&*dIFJ2`pwDL*j@5P9L;*$XF{`Sw4s7bF0apE+?J>51U|hxYo3=I8h^ zw>ovr1x~Vxm6rq4eKwCb^HkW!_ z(@S1AUfabF^svA@(~Ei$FX6>J3-By_-SFaG%1hhfo2jMV7SwVhp6N!tUZ6BAa5pv= zuP3nV``n0|#<`u&7}b^rT(?sQ!(A%r9n4V??b8Uymj?{JMIpw2Z97@97Jx@>#6$QfE}8Td;Q*k zJLq+*KXt=%>naw)A$PsE&Q9D+c)c^EW|I?(ljwtIkDi}6J@I(vk3L@ii_g54*|SH1 zLdz7LOcAK03}jK9DitzUK+R-GqG)E;jw6>`%hP+=rgfw;m8dE4)n47n?X;_OA><0v5eO!2C$ zWx)Vy9PNG!gmTb|Kj(UW`sp9u#b1Q@F*%iYL8lRDs}JI#-&I*G2U7bRYCLBDDMW?D+1G; zqZRv1eyV^^3KuIbP&4pvxKya9GBaA!i+0{m*@er+X?%EfvEr)x`5-a9u&D3xO8%4H0=`{w|7+hwsh`^7?*s1q3;R+Khtm1iegsfdhd6IT%F>k9S=8nv;Vph2Huc+(H71jKSdWHE&S?NINkG^T}Nf-sbsI$!?gG1UQ)JvFU;b*S;kLr}K z_NLQ#z7u#Ea1YSULV6>Ant8rv#m%p&A3$l-EVBKt0lXY`Ss?^bMu=jJZ1@#2n5G?m zISw&~WkWaXWWZtMkqa#-wOFLSxph^seI$})Z$ zoMX%ejoQZ56MU@EwvG+~jt9vcfOM`6m$Z;E2f@aLkRlB`8dl$li>fo=^n1%tpJIH) z#Ea!}Sf;t$C-KZ-ks+ftvSu?RTSqGz7?V@V*@nOkxrQga4TLGZ#F}UofV?mtDGq~f z8&Et}(QHAQ=v&|hT=g&U{-`N!^o^jSq^(>Zip_fqKZk@&$mqReU&)wYXfdW0Of-y7 zGM6h??L|P`oS(|&uvAOI6MVZ}oyMrVi89rv8L%F|I~cF&LaEdkGil0?FpIQ@ll~L< z39Aq{tVk*n56eZUckeL#Ug2Y!$ z7wM^==Ic_fQCr`+Hr{!*$~+-8qG#)M;{@tHMaHzP4GCK`L6GeMRm1~!UeW!kD?0FSxG~4FGhL93s&Rp%8npU9C^MA9{`9Sqc5A*XwoReh2Q;a^HtKdhko**Nb1D-Saxz)h}qEcLJ1# z<7k#TB9{sLX(}s=4qp?(kz#N-b0K0$Q^Z1c%!cOjSwjG#z74PbFE>v8UwB*nJOi2QtqIRo{YU(otrDWNkT3gT zw+afjRG4x7$d#f^QPD-AO-C*DODyRZ8GM$(ze6xOpf)r9a|nQ07fMAJMafjxsEEA- zY+0Vw1S+X5KPhaWma1BLNK$%(bF}-bC8J#m^Z-5HjOnEK@*0`ZtX0 z2pG!Ne=?jlHsimSNNV^_1Kcr?#OD0TS|?ngJVWM`Zp)pZ;O`=90}!uOlYqgFnpk*O zqo&UxLKYBfL4kJd+fxIN)`8DTxi*iCM$z8U&?)Yickgsp|wQ4+(}|FW7n*;+;@# zgFC{Kq3S#W8S6#}V}Os@%1RXLE0o)W^)_gC&0Dt&r{vyc;~UZPZg(@nw7caKp*N!Z z2Ena86TY!)k_->T9*dBeUM{>H-Zi%C?VgNX1?N|(x)gglZU;;Lk$+^JB(tJ`E<>T(y{*Z%7A|hS_fPQwiVHbtyZS?ZK-V*;kH1sP z)t8X#>-e=b`J|ddg21ubT@zI;3U)G8mAMSTQk5*jBJ{PzN~X4TEOP<+f~taEl$=DF z97SFg4kfi@>JR{DQonU5Gn(;}MM$ck@~KQiqry*zXmb%f2C%x6A+Sqv==>Oz5Y^IU zJOZ>SKX8@`zec5vb)WkZt8Eg)2GvB4|eQ3s3<&PC-#iqO18M z7VyUi0^&j1x)OW~1gT60 zL;%{Me;w-c-(D2?SLgVJJ4tY-jF_Y|P+fCtKJDGC~CknnkK z9ezXjt+%^CBZ2b)w->Ds+uZ?;lolE(>Talycu~Fy$|@>v#=VG8M1aH*nVNzoYkHc3 z7#gpcnVmIrG^4(NF?_pMgtr;2y^JirOifR0tBF(#|#e=_!)d?OB(6_k(w6Mz@6S-@KSA54do9O6@wLw;1~ zen)MU8&E-f^0&BoXYxuYHuYbS4+0d@QhFu-I7>lYZtilvq<)L(B(KzOGicE`vQ|m= ztm8U(5Rnj8r-k*I!Wv8?PKhcdtdfc=)~&hUV#=_w2`oWCf|)DUtERoCxjq|zj9d80 zdOePaUazzS;9SDm4T~5vT#vZM97Y%OAhuC+@1Rz2r=KK9WgvG_q&IJ)pwTYrLfv(# zX9)d|N9wv{9I<6rfI>-W7^diUeVfJDi5v?yCF>34D(P|SW-??AT;9Yzv`UVhX zSoaNjBfJa+j-;DN-`h+F%@w4Fk$$R~9+7mdR2N8i`=?Ow?JNSwhDdJ&^GtN$1%IHa zp3EJC{l}Bfo`%kHq%D#CbmDyTnaZ;yEwfx}PC<+edz_pu{RDd@jTb^DN%h9BHq_oX zt?a9SJYLt6hU5x6p(V8Aa#U-9Gaip86`xGw*YU@1I)b0WIn`tt`b3CE zmO%^AEW6823CqxJr-fzcv3rGOAQ#y0ck%NsxPTdD=j|>~0*s8g5H7dzKC2mrKGaF( zA`9XYquQ&Q62+v9$9e>(r2s}u*e|-3*{ihf^bVSy(sHAED>M)Bv2!Sd|))9 zg0^BFS@5_=#yjyw6zswP*#(g7Nf66lao5>hA~cLZXgIXg<#pB9djK7C!#23Co5tJ* zl$EHD;5)wVZseO4(@QYThJX<0X}7!SI_g!MK>DXi=)0TcemCxW?&T4G3E`-jl4lKb=a0CIxOf^kTyzFSBD3IoA{ z;oUumjfplpF-uF`!J8xn`sCSoeXq2hNEkJ3Ao?Eix)HATNEi$w9p%zO^b?jn@cSY+GYZinL8!*<_L0L@Q)jgy5)I zV`F2P(QvKj%9Sf)*I+ieI8~snV6ChDCN4K)Q#3_`joQq$+PdIz&MrOt@czB~_m1t` zKek`(M!};-Hcm~sdu^dmduq=fE&j{w&A?7@8m2Uh6&gutrUw>0$7mQimN{Qgi1Jt@|e-)eb1C-Y!r>LmKj1eCwII>Gn1`~I z5+DfAgzIWDXqpNTGU`iseyx4RjKTsp52kv40c3{t+kXfJ+Ww|1d9PbJhR{^p_XE< z*f_%muK;TclEVc>4#|&StU4}Ljan`~fL`&#pnkHnw48!jq92C&GmUiRz2Y=%u}G(G zW21!Ty_>PE4B~tlhP`JoZD1mFXIk#r)5j-I`Rke#oTl(joe=x2R4e)11$B!BCs=S~ zrc}+l5WUkL6t!q83uWP%B5GD7RK)^B6%oT+z%+FOF)8BH^Dw=v6fZ3n)Ck|X!t63d zgn7P!+tEHvm*36jtpR9d;!yZT#W#w92o%H0E)(L_H?FADEc70xkgV5JZX<7-toGBm z(2jRRn^M?iK#dJkDCrKOb5Cyh;VO&L3kU9k7uWE^B_cr#e^G)jmY zwZXOE-88{r<1dZ~?~mcI&?574B?k(w(WC|s$U~@l zpe|CoFpi?#FVK8+Ddt607Uoxwczed-P-T}Jt%INtv;v-4r&C(j$bomoN)l+NtDb@^ z5-8$QuONwtmIVm)v-lpga>Y^=-HE`9l4V$73U#iB20u; zbrFi6h*K&Q7GPcSDQ10;ZweDbJf|MOt#8c0ph<(pwPPa1%`iGY4G=K(^};BC-H#dk(Ov>eniE?JSv6l?w)CPHsK_|nR zA(lN8R1w`aMiRy! zqV{MbOd3Y#zHw<)v)JK+9@#8Gk5Ka72-UHOmjjg8Ae9855ec@9coXrx-e}QUq|&Mu zHA$sa?IB2!_Uvf1N9_7N4k3T+@NffZ2c+Bv)D0~UarH|0dZ*@oe|vKSS-Pd^w&z*Q zQw*MC@Cbs@UJ?HI;e4s2cHmXbJi{cg=ssDv=7(Wi?f2QhYpQzfXsPsU5xZZmK-12w zT>CHs#I_kN$f1T!-Onepj8SrwoR0#tDK%|aq%@dnQ;VS4q_%->@weoxe~`^!DKME5 zIc;n9iLDf54E=n6CG!cWw!$vuO_ZT=EJ225vRQ-vc&W8jHHEboST)-X*80A9FPQ6I z#q^GugnL0uvgjzbCRYm%Mz~vYX2HGe65+iWdNuT_L2hLRDlg5e5Ilgn2Kx9KN`MWc zSb28T!!Bsf*fHG53fh4cgr$w;jTs9y4giI013--CD0X;#NY7kMTA65H#G5ktz$JiZ z2i-|+f!c=8&{QY(r8s%~rgXP7SKW`WGl+p@)!hy9Y`?S;8?>$tdhn1Ju3?PRHLepxx-LT;cgn)I{qjaKZ2k0od}wu3R4Y&>z2K6pQ?0;N`xa?F!e8{GNWQ%$gO`3g|{C(+BnU}YFk#urG^umI` zHUH3!)W#nLzx*hfMsG#|x6KP&VK1gKHU$V#+~Mh*QQeQwN`-I_ zDyR#xb{c`}>nux&dP#&a05+4>Lu$rN0NaFbq7<8NC2M~SEp9h$2iz1ixF7P2ruO#r zrL^1aK`615_R{qH`qn4C1#A2Ilhi+z>Rb>*t2oV>x);SPmEzD6QjO50?YWYMH-Z`VM!4m-<9V zgab(}?m*gzmQ{J(8cX9_Hli;!LOe3zZeo7BTk8m(4OJeqLhfDjuxe7@WrUU>SgUP= z2!(mlqA%$2dJ*rX&h-uGO(-$`+>3hY7_3Zxw~PQAGHT=n)U72=Md%cU+TwV9o&GvpSyP~xT*?1wc zG_W+dv~Fo=X}#Bf%T%9%cxU~5lre}{`tZFQya9wG-XOw_-a3Svydi{lK^HUZ?t*qY z`(4A=KmJJY`ptRlfJ$7$>Os6aZjf2h}u=}9LHk~he2-O#ZamYw* zTcvklS@IG9AjW$e$J@S}qX#-_9PQq_9QtVJh49jbr4jV28MSeyU^H8I8F#-of*RH% z|AY8FT-Aa%$3(9&JrCX{g4TaWNz_YrTsl=vuC(PpU+ zVx#&IY*fAnyRo*oNAYFm+yVTs-B>ho-wDnmgWuMrZQkZvmKyW6lJTNlTzNe%J0}JzfT_3`(0ZC4b`?@^(JE z6~6Slc*nXGQ6Izg;L>()d+_cKX+`6%k9QkGOZRy9)Q@2W?|`VyumaODPxDZ;+caXFS#+~b#ViX9)B-6$kshBqUH4xKV{#CZ$RqY#-ImId+}RQ_5F6= z5>^gcj(m-)$+w@t?@5T2cVk^g)Su$bGZn({rC)~5wjruw7K0Ti>b9TGC+@s!Z zh-0z#D43nM>^(QFr9IvrXmwrAp(O}drL-t?oV!rcX^wFHJ`?Z<5%u=uCT~SAWxPH0r^)jO zxBIS%lR~&cws{Jut7h)nrG5{QN%h|luA*Lm)K=5m4f7MVnKTU05hxcRw}2lQHQqMI z#v!0giY&z8P9drqWxt@%E8t;hY6wB%3VP+Y8bpN9D|;zm5It;b6U{pdR)2r3Paq59 z@V6fUIICTEu%E#P)S=dJL5wV$0oN4FyEAVmGvy*Y22^Gdk7o+y1^1f7u*Xy_F1XmnXn_}+ z=5|1A>G5O1c0lgYQaNpX1A&`^%|`?HUR_0-vChbDh*&COG9?p?+dol6Wp*sHYWjZb z>?<~!RTn`0J|yEbkLB^b?1UC`@g>}Tl>^wIr@@dt!_lT}P2~~P2Ep4k+AE(;<03tA zNXM^g9ZOn+xz-Z04m|{ogGC7Iyrr9{1wX?4bvAK5#;)1K|D77UG^Zf;MXJDWT%)$( zf0Ei@AAX7z+^Log|3tN5S9sv+;f_^}ykk|IP8aZVMi2-u*WXYZ5e_yaB7lI_SY_cF zrW4=ry9fev4mL;@uQtZ-QGW0W7{MvFyS)KeC`D%;6Yags{XPVK3@LeRz^wHi!`4Zz zNo{bI*-3FhsPJpQb`(mFeKWQ2V6z7I7fSsZQyycGVPg+7u-IaV#8q(t*noEL zrDCPvsPlYCK2r(2{0qkZgn`Wmf6Lf81b+O~i8JSNPoF;{45gak(|HCIlECr~U{ChM zl;IdeHj)PLOgs}bHkWDuqNeyBd2mI>&QCse^n74Z*b?7sE4*+G&5c6eu!yD>`5GBk z?v|`fLxoYH8bjvmZ!kHkbu_*;U$_Rn3iJh4xLv?rb?oB8)uHxHv=V+qHra`cR{a=X z1>}QA0+{d7NKkZ0_p-LH$}{eegKkd!Gu(+n0z#rfvE-+Mt$LyvqG2@DE0}8(*Z{>_O;UTik6bvc2cY*!xrA(46)y4 zq|-_(Ds_&8+>bq9#=t1l#$QcK-S>7|~f-le{!elprnG4+Csh62j#!!-gcmVR8LP%aJN8pBTe z&v+1~cs;Y&j^U-inhmPYd+~a=Hz=$#EIB-G{)l)MLRi5d3~W!{NO)bj^reNCNCZ|U zJtW1!au}+gW|DTI-cRFqDGR>P#J1!ho(xDIXtT&J7p(pp5$17eeL)2mwtRuf2-|EC z>PEX`jjx`GAiW#uaa_~5(r7Yl_Z$w@4!G;lO zp|z<eF;TY@t73n@z$wA-0*z@ep+3g<2zJ%?G4^o=q-r(jw-(^2O#Kk< zfweR#>YJr0t<)xt%vu#BBb<>CUK0?X&dqR7;jH=|7PNxesUht!zSKF1N<$`rA#@bR zCIG#-Vg6&(8WG?~)g(IL8 zZ4Ha6;rmU<>g3(Z*O3Ey&Se9#I}2$5ya=&R7PTkT4@}q>K7}2$-6$QK zd2uH@)%Y&KDgY+L^K^;=8BEfP!wH}1b*hTq)_A}3Edg+8l<305kANInS-ff52?>)DV($)8 zU@;EkzL@@g*px%6Nf|lb{Y3!QR=Me*HP+maw1QW(%Z62@-Ph3q$r-eOQuq+e2+@XR zv$GC&7^$Y+FRd7m5VB-@-A$0l&G8%J_(A^qh}f@NkRn=W8}4C)-)P%<%JhZ-@`FCW zJt5*6&AsT)3(jNA|a#_c3~E~!a?TBj##>60Z+lKxLhpcv9AdfcGHhwMEz^l@#74B zf&s}e^-~PK4Yf?p{=-{;_3KYfoqH65okL8e^$ck15{UkAVZq5p;Po(fQD1_)%cEi3 z&qBBE;L>s~H&3{lMHL|^u1UdK*b!ufdyQSz zaCj2{{wCbVjVFK-=+mV^VMpIi!jo0hc*KmuvLzng1WK7I@J!Y8vYBEo;N; zW*a(SHsN&?KY`hABZAp2FdJ9w$iplL%;q&rnYP}iV+MQqEy8iEYpV{}j1^{9GPGEd zeY|vkhdnaRdtL6a41UB3d*A4eh(YZACa7Jl;0<9m2!$FXXh{5LpV}Q-rk$E!=Ok=V{+_%*?1A+~u zHogwzpnM&OJvingF$bmhH_!vx1cHnBEdx%P;C)Xmz>1VcPuTPR5zaBeo&St6;p=1u zUg7Olz8_`mZyA%BDiiz`Z#lvJ)M;)i$elSoapLHCKdJAI!V@Uz8FA(G5$63)7IG5- z%=0-@wQTIS86%(Rn==*l3(WIxd1rt-*DYN!`xKYlPjl9d;MI^x^EmEbp*>VTNDOjV z+cEEIo7b8(vYdsa>zFho0Au*cq|pNC0MVnv>&7zn&b!=VE~ag6gzhcpp!B8b8Yy(y z!g2ub!!lw;Cw___6VE$2ILU)AtgXnTyHnO|U@StOS2Noyh_8?0!RPs{oYCBO0fRm< zU_LF|_l`E4R%tRhaM0BdAas0TgRkS4Iu{Yn;$JTSPN7%BuY{p=f^nAEcN$5(%j z4atccVLJ*XkAz-)4&Ku^U2W`1nCr6RnCwZY7$Y}82+^b*r7;64bnl3z66#x~>ex(|XqzY)dc+&`1ONQDB%r!$lJ& zQ{ec-TVZWIE%ZqYR?F1Ih%yTT`lnH#2ACq>qB`pfgddb2ik0F1J3>WGD$R5`1abqv?L#PTdC~V!W zO+s8!BGLG_zLrd`O*je(peOsCF=P@8W(9^6ryJPBNv59?QByFXg=LHF(}YcSSQCI` z>;sJWsU}9m7H47LHAXaErBf@qxB4{wjoyHBLSRG*fe{l;j2HuU6pV;%E)l?1;61=k z{zrhdtQ!ClGuEpx2&=qT<2d!7{%s`s&Gv3JmQRGia z{-4p$y(scWI^U5rh&kHcgX0ssTi*M9_CPbg?h(n$FRhFrpO}Ct_GmMo$5>ll-ZMYU zupl+oME4%N10S)175jk|d%eB@EB1LXivxa)d+8ftP)oqJ9t`Jn zOw5gvpfGxw>h)-x9?U7QESHV|=Nhoh>=Eiu%E!zlCQo|6yuh0l-LT+oVBgjq%LTO- zqZHi6Cgu)29GM0Fi$Emh4L%5F3p7LnM@Q{s^VxuYhB8pHH!usq#;c$M&>9%Kp*3Oo zR#4t6d#nS$$KY~c9b9}$I}sZc{2spaNq4A;!`IUU5F@nSh5$#d>w_!AF>=2_uKo7N zDjdFXLc1pZPXJrM!{dZ)65u{0_)+s%{}1kY}PD~@j_vU)QSSKwTO#{TExXe(h%RgxR5okENDKzlel=Ojng-;?gKG`-_`xN z?r$R}e75z!F=!DMQS}E*5bEOh82fz&oz#V&YQymlMcJ(+RyCPHA|u`;GwA;!F24-Y zH{-V>_>Bek+aaGCY9l$4;oTrRhK=nYJvQN=-fu{PP`b5>09jMf(cZX@iA1)5#u~@) zb0!b~5YS}Tf{RQMRKYo|HgX--lmV5drYWMN)}}k3%v`u@I3M|ABjlj2N)CG(+3$70`Xrlwn9SjVH9zmJxn;Ecd7N? zLH5#eW8Lx9y?8H)pM3RaQNn%ZX?2trl? zX$8aC=qzY2;HgHsq&CyLBwZXtlVgTo1D!tw-8B>s;2{4JYLjl;c!-Alk$MEC=G+GP z#}L~=&f$S;tjD>jk=_(&tWQ&WY)6J70-G82^Q0V(g3V^kz0^~=fjU_)T?*0(C_)=@ z2%e`wJlc3-!!=s)6f{obDe@$NYtxcTN@ThEeQ~D7JN+i!l4tU^JVOk01hwyQtOgZA z6t0HSLOH|`gEsVM zu&&_9-O)()d6Y5Qqo=XR&^e8<@f;p$@!F5!M)O|ANx!Ql>@4^R?4-vI-vT7*`!LP0 zQIj@5U*@D1eoB_a{kVlF{f-Oa0X)yboaaDR?<6{qm4jIhKrJGsJ_oYq#VWvFRl_8% z8(RPDQ)?}re}T3>&&9JB5iTAWlgJ)0nse|7h+)J01^^3w$!}nfn0eed4zL8bTQC7x zo4aW(vIFp^^Rw)X3AEwl#7X?E*d;T%e&s$c(MoT{OMbT&<5Hx1;=-i*9KWMWryYCx zvD3%Yzh_E15d9vVIF>zo;XrnP$`SY?r-T^}`g!Jsqx}^VuBjT{(Qc}Li7{b2TjpDQbX+oTo4Wq z@=zRh6xeCdsbT`o8v(_+(G_o+ZxAj)hN@Q=wxC7;BcP5z9;D56VN(R;$(POtTb{EK zI*M~^vGHBI#^H!in}3g{#HLT1@!ikf5kO7JJ?-3hXc0v3Ww?*Q!DHq8d;!J~jy7$e z`JW%>Hb(IkVA0$F<{W(AkJGUcZM%Gn58Jw&EW6*wmSB^#+>4@9T3IB1PQuN(ZGUrlbyfN)LL_JVPnTy z`Wg&uTIZfc4b(}a9{Sw`2&8xu<@GZD+HPoS`sVtzFCUl|4YYP$kx9X`A^1CkX1--e z1o0wV(nw1;1C-LU^)R&Zx(;}fwNuc-W1kel(A)-0kZyd9zS03r`i&?wNbuk}H$o)@ zb~8P}_}oTnxNnl(>uP*CbVnctyvQsVH9=Q6G=Se|_hFwzKhQuo5ED|UL-xAscX8ho z<%{*r_=+v=Fc4(7+gHyZAG~+Y^36B|{Nctu-qTMlyFYjuk*9Kxr^)(O^dgM8Aovy= z0E7^ZlHn9RKkDZlS;l1(yRx>~gWLh3wP=_A`qC+x^6Mu2bZ}r@5hMvTRI=>>qQUnP zL=cPUeZCqMAgyz*QR+xq`~rT?Ac7Vx&RyDVoC;W}&*Ax+y8ym~2d{B*vnXLqwqiH7 z0$zggEe@Y+Mh>73>FdiF>#{tuKk)e6rJ5C`S3g+M#}RvAKt;2ze5rf%$7>(I&+Xa zpuv$CSc#%=G_7IMUot;&ruq>ELRt##en?&^O^HFQ)_nQ_etmbMzB}53H1H$ zj*<8vM`;X|(62QODM8wUAm3#4hld0t?G@0p6Y;b;jPwx@>zfS~!$hg21HeBn3PEQy zJo{_E%D+E|Kds-9jA!V@0eNSj&WABLuP$f{`#Z$WaiT8EB5FWQ3hgJq{@JO!E$8 zCQ!sPE>t8QjQW65;8B<28QJMa$OpqhDzjii11S083?L6(Zkg)yfOXw&nlMRV9V~SK z0a!YPQmidX$50yqd9ItU?SO?*vfgX=z`+)M(#JsJ_Al!id-d@}El78%Zudb)(Vw40 zmt>B@5xN+XXY#V!1!s|9Uh6Z!ij{E~ZI9!;EH@*0c!U|8;9xpsh<(QCMO_}`c5xqd zM)o!&Gx&Sjfz#4Mdh3iF%{SIMZUzS*;j~5NW_Gy)?=FEW2_1OKEpTJ|@VK*-4myG`gDA^|KOe&EY|H zI0>s-DP2P`XK~5kh?z!}a#Ry`%rrA*8Xqrh=s>9rFy~-0wkeTl$2Q$~}IVD~uAAU^mpJX6Lp+!KSL{_gD&MK%4Ic z*a^jgt`QYdZJ=2~4luP?;UL%gT8ZdUN$PJUv416PUY6UDA3sD0+jOVSMUHtxe#k{^nNzo9BY{r+6AdV zOGJC|Ohw>h73M z{dj?N9NvbVCqgd-C8!7;wBa`)8;rlBFGO3$-%vZ4wa<$GeX`ed!)FpQ0p0<=5O&ZW z61Fce0T-J<#EyjEgBy@f9MnbiUUM%o2jUr%H8vl0K7yw)rZgyn##5VK&A$o(2o8D% zY&s*Z0OM2>lVFCyHlQLD24NhWM(PjKpF}tu1;UM?4d$H!dkTeVxdX?3B*yz-svd7a zFdp4jhaC!-f4Snyz)NX(4>N$5+yS^R z9HOHGor?Q)_Igl;-EeczOno)v4%Fe8ufE>i0K8zj!%+UyaRmkGDVE_e)NiKR+KJuz zsG0VVrutzoH8&!?J3^(0^9A(Z#zq>xaZ}L8DJVN4^^HP9eieDJJ0F&t#yo@y&7Q{j zMx+omMM*jh3Mxb1F`ZL*XEWYdkz;iV56EkkiS}a`IeLGKow-T%7|a@=R>x6$URrD@ ze}M)C5Q$*-IKJj1BKY5mK7vUC`}5WI@!IC4^}EApvwb(5MaCMKdL zR;B{err@msLL;G+X$K2R{Q}^cyBTKlVRZy%^L;RzKZr76^VOUu_izXN8xi={Cve4o ztAyD^6sGsjhu$k^OzKXA1|rq-1?yl-!2 zp?DQ%&(+oyD}a8*0BB)1C4HP{0JQJp%exTxrkc9_bsXp`1h7~%f8%$2>xpwG9#`uK zU(9Tmvp6M(DFGJxF&-CHbgwP;;N=yT__eKv;QvZ?duCVwjy%JW#tJaAfspMEFafa9 zBCZK#+Ew~+(p%+^d=<&&SsGT5W9(c-my^){I{x2e18pI#KyYQ-Go+P)B=#c zR#U!&r@zr?if9I?Us_cWF|<^)8xa|o?bLeEpy86oFP- ziBgL?d8lt%i~>AhXsZ8?+|qA$v`|Xg;_zDdVem7E9~ye)OJ^JJZ*)mp@c@V<( zH)L!Oeq>$?jZMZl^SpYQC#LG5mX66B%0RG*BaK0?G}fSi1-&&3J5;vF8Md-R6WbhR zGm4y9Ec_L$m<2YdKV(z>2!Wi+ItFvp!J+mB?#iQ)o2-+_L>-<(55`2C73Q(ANf?HJx+TDFoOQv~fCEY$`X1FBmc*o=g_W4B zX6QT?f(9*7vDp2T7Fnq89e_y4byFZ`oh9S`cs$&PMTHipf@_kOF) zV1pMVb16+FypbD}lG!Q!-g<^^hpYPL^EQqTtV-kS$Tl8@^b}qe$YnN?n-G6(0m(>gbN7=%pY9Cz#{+zqlQI-gXRK@`P76s28=EI_u!Uox2w#7_JJ z@dmL2p==EDYY^h79c6)Zge?$q^GH4HfwUpUM4bB}arh7fkd+Jf~r+T%xaixrHvW)8{G zDQ-3NEfx$-cYdZI#S0izckxjS^>B;5Z0@G4%p^ekG9IZV)`|`GXc>G3prH*Dcz#4S zMfR;RBm5|8T`1T98OX=-vI+SZPHinGyNBhrz++Q1CW@B7BX|@J&>S>OfHoAL*j{pmk%B#jpYsv|2vsq6^jvd>q0PrV zx3u6uUZ#h!BjW&AQixA6L#$x-z{xOlXiyZ*#q5ZvM0gf?zjYQj0|5Z+@ko~-qrAjpAJVJ=M)Y_;Cqda^ZPHYN(!vN#{C znILWtw&3}g7Ha}6<)@}uV&1S1p!8ZtR<$=5GX?kB%?w* z3qKhM!7&ZAOM`hn7j?lX#s|{9Pq8)9BpE*$y4K9iAk$ED%&^ z+D@sCZi6+~3uY)D=>Eik0a^t~Ku3$d;-=xMz(PB48d1=qplvfIp1~Oaan zD`fjH{{=D&&|>rh(FR%+Wzr7zLLsT?~8$T|7;=$7_-i_oH{5b{! ze&n6ZxzlHw+95fHKr6pKi!L7>RCl8sJxRXCNkD=?yLGw6*uP`&Sq7iy(~mGFOabZl zwms@wd5c~QbVk(fD~lyRh6fFFMXDX=mFBMkFO}g$g~yX=qW+hepE!xWNOSs>)hxXs z$+@ce&C7k{UFS@$xbdY1{Xqo$6EL*D4=6Pa%qqzA~bQR zU?RoyGMqQsVW>eqyA-|wI1j-flrys7Z#V1#Y0nMX=Tgd*rygjRhO$Q7W(v;29w$yu z_&g;X!{-s)bh`0m>6p>;xzn>xmQ; z*~1eZn3f|z%0VO&I}s6yL`DvKHZ^RAg)a($h}7hF{)>h*!t5xARbq;Ka|2ndCu&k5 z)PaJOB7rp}6%QlJK~6cADPo=&TQ1V36u!=tQ?>fIvAQwqr47GIg4>0vdkyC#XYMHdY5fj`R{X)9V7#Bk=b|HUR z2%j*V2x>EK3JKv8fV*^UxQO%Nc~G74t1N3Y-+S!DIg!LZ@-i0#TdzOPqoWEN$TN3BE=x zT2_CNm^ig|+hSOy%Y$>(Y6=4^AnCdtEH2I} z-@J0EcfLA-k=}^PPhyeKecz~g@+7G3pI{q)lEF_g_-O`jGWZ*Q0jX(4YFU5F3C2z` zXf?ChRz=&kzra#=*cb43kVy7}rb)(=$^PWs@^l&xI{w&U{BsBtIs}U`VVUW~K9L0x zVn-n~jJyJ@m5pM+;y59PKGcdcftl2CE&izerbvyRKYQlv<@+<8Su#PMG0C#pCPG!K zF62}}E@4X~Cq!3t-dS-daz+NIh^8-I#@V_!jjEXrfj(>*?99|PNb_g%U}~H1ieo~m z`Wq!2Elr6z20$&2Xjxqbgkb`m2ur1ulha%xjX%Z-Q2BG-i@*(C*NP5y!H z35%B<5JK=Cj&H$2;t)Jo#hCN)g+sXFtVN`NwBtrR#K1JNm$4+`BuS70jUW0tMydkp zkg7ZBhgH6;%9t4)3mD@r3s}gV7I-H|A28Y zeaD+OApnDZJ6eH{COD&mpB~zGd9X&s%mNK0#_WbZ1A7i_hvD1=JMIjFtw_*J*;BlI zfVYB1g@dCxiMqnzNj!yB?`0QhlB0bA3&E7lf!=XI<_&=yEJW5#Fb%8W&n?rgovBx zKLCPQIM<3WCG5e552zT<8Du-}f*q=)+$6Sv#v#;>z~-ZiOdv|)@sf~ExM^)U2YWlX zPxNBdAA}7rW+k>Mc7Y+oi34CTni^(^n)L&`{SX55@3%89 z`@ddd>^g&&8GHwWR}esBH_AA>Px~wt1z(wIHyM1Cf!LJ4#@Hm-b3iT8;h~AABDJY2GIod!4}>45XoKUfb{sDHeG8)8uv&*X3*= zI|}y;>cgBC+wf*6L2Vx)$WGDqQU6DQ0ykyJ|KF(#_~y|RcS49&=SjVT^9 z3(vD-IHe}ro==VzHDNG;T-t^15yp-(^L}L3J+{EKuk!X`1UMHD+aO#xdc&P}sRW}2Q1oS*1?UNnQnevOXJcm-_d2v&$9b6Hy2-49`3x;^Ro&lkZ zILGq4bqQp!6^;}f;e(9~HZh>WMITDIg|Q5Stqis?xSPRt2KO-7!Qfs7I}!MOx@3** zB(9glWe(gQG20(Act3AH!WW`|VmMV(dnJ~6{}?m;Ap=f+-{s)A*27Eu^5Ixtn@@1EtI1#`XDH{oNU)=I{!IY zEzt|KEM(5?wx!cVuzvh-xoR(#3gRl>`8evcz{srpr&363&QqYOX+h0UoXVQJ&EsY& zY+*I=pNG&5L$}T|26-5Ad?el#TD{?hDSwqb&_9swN$*P+(oe^uy&nwm|B2pY@3z5_ P_<^)Lcz4H}fA{|bgrKPM literal 0 HcmV?d00001 diff --git a/utils/__pycache__/downloads.cpython-38.pyc b/utils/__pycache__/downloads.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e3b5125abd03c5ddd432bc977077d21a7cf2b4f GIT binary patch literal 5324 zcma)A-*4N-9p{l0Dao?z*iK@*P0F}g6Lq!Kq;1-@`PDRb+AVbw$5}FAg`nstSu#Z` zcT{2%)E?%(25TROB7?jY0}AY6*k7@y{sDX)u%R!*u&2QouzkKq$#Ihnhy?PEcgMT$ zeSdsD-{YO}@w|rLg{f=xlgBjeFZ6KmXW-#7F8O=hxK`0P)u#pOpPm43jAemar<*t8Ry4%j^|M;^5eX~ z$5ET$ukZ;ziP|K0_%xqE?GQh~kMdd6ruYJ%=P#i)&Gp+_>D6d5e>HgIdqIsmy|CrU z{CALETP?A~yjcr3P}-GNcctaI>vf&FhCkYE4%bJxWE;0=CDdYVhxN6Pwe^7!YrCwk zhi0s|Gq|j#zQg3}q5Vr1GjRoDvV$Cr8RU1d+fCiqxqer>t(jVAQ*V2$p>O5|XBz!~ zrg7u8b`N@m)aL4Uf5wXIE2TnW$WSC!Pk3JIev;|dgmjZE9@Cmp^Ss28^$oYYJV`*?l*rW#c;5(C{y26NRn z6HB195&fn4ftCss>VvD7qawuqai>nzI(Y=#|YN0>N*mPD&F>KXpXFeWZ# zjQ3CpwKm)?*1po7K&{+(tPgZmFxSkhfdSPSny*=g#TDW8-dD*+)6+zvVGASsfRD)jF?S+ZOU8Ow3gDOq; zVGUZ7U9T0Eba4nTCrm1>rMJV>QcTyZLhrH_xyR8aX+xSm&%_Cod#`PTVOPFiKB&s2 zdeAAiI<=-N%lCV7soiZpr-P& zJgN$pw}e{{WmK#Oe(3t)B6idaH@^E1ex*#39XgIo%ni536QkwBiKyxdnHbp2XvY{S zyMYh4BX>El@i7>BwCj`T8t%Ht#1HZGH(dDr<5HcrG5TduDd1AuT7fwfFlZgt&oB*} zQbEWOfM03cj1BmJ*|(x^qkC~?VDoIu(2mf}w#-=Iba3Z#L*e}^#&F>;)ClRWDXIkH8TC_HNV}R1yMewS1 zGqH^o3$fn))AuB>l>~SZ%l5QfE?ymL@!oF|+!oCL>@b!BdwWMo&!34(c z%cbG@+o=}lTV9I}jj++>P6H6=oQh7L za(LiM#}7is-D=6u34}vF>$IeE-s!ab>izA|l@}s=d2Q{++WStz^5Q7W6%|Iawfv?N znNAouk+HP21dq!ilA7=l8BOHZ+{IcLx}9#Q#ue&t5ohsLXYo3gw@x{W0m?~~i^2`X zw)2*=_>r?%noh*JhybVZqO{lAJIJFu8OZe z9E>kS14z~6na^3@40T{B;)T2qND4|O5b2X=!t1|a@@tH+iRRLAyTILczMmPsHO8~M zO#E^9lp!MD)%zB_vcSiH%4|*pm8xhSj3fSj6B~mf;)lF=Tp_#iV!wz;)MASl`9vD6 zK=Mg`=yQ!vT_q&2qp28JAL~2%&-H$`ZzE#kG-44g*He0pCTYwL+aIyMKLP+l|7amR z)SjY*0_8M6{MZ=GVBZCh_Yp{%#jNxD?_5sbiLqW9u?O$5);`=m!e{V5`dFtpjO|Se zvV6^F`#IVTpHnC>A7`=Gm*NZ}7W-3}@UrSTrh1OYIrP~4mB%J9OxXeuej<%xu?Pz$ zmRCb8hRpOZ7nrhA>vB$m2kU(EbryZ!u8ym1a(!ic%$mq zI&Ph~b@UsNx{{M2E@%pT2nxLW#BUnnInU-F zj_jcYA{`F~QinZuDY90Ap3j}A;HZsy+fFTXnzW+U{?}#f$*h_PDBjHq%FjPu>#ia9jO}`K9roploE6OeI^w&PC>8-IKUWx0zTe1c3y) zVQN4_kbMBu3ZG@K(P%wuOmQ6}D!7!M$coC6C;z{mo|>$Z@ny%`ifky|fzz!cKbfTk zEg2$b5rR?>c#)0l8R?EpvWhvX!DjUF$ep=pJ#?3vLD2ME<(S8lB@ z-*NBVy}k12(VJdRx@X&0e)Q>?TdS2@*B-5U(e!Ah13?i@KMjWwy2D)M=qwmjN)XK( z4sqS6kC?F`f{yb)<`N%c6QEb{GR#S=qbw<|(B7De(b7?IhT8Wja>#8sOSHOl9_V_t zx+~7o7=j8xR>$$cf6xn%w0}ZRd620H*KI;hN z#*#KqESa}wSzbTFir~m1&J?qHk(ox3>1+xuIxe~sgC5Z*5rHP2beueNHv4bO5)E4W z#F=yJt(%QX``wMOv%bCIhmY!&?d9bU-S&+;TfOyjojp73beE!5_j|xgN+fM6UcCaS z1I%g!sv*J#OvOR~F-jxEnMHvw&MF8c^9jx=+)`Y@R4#f2XBV(8xWiD-Ybe!om?7@v z_NFfG^DK^(IqiccsBCe_=A zo@Bt@m@UrWBxMcP{&uuBICu&)@=wS{`SM>!GGaH=JCtwYe1!u?p3iLRV7rrOKRntO zGH=Qb3g{o_M;_DY;SMqCXsV}JpVj#6S0Y&xk&_}Dby65*iQ)P@vG2Bg48&<>_0IJuSLW`+vfsmb z@KTzV)}5tty1uh0DMej);T0F0i_U18>-V3|^FlM=K-Or94g^=?XBH?e#yL&p%1RJ= zF1@fgdu%}^|Ix%qi;hZ6Nhk2*XuRj6=t8xmB4XVxzK%rJ)xdY1(?IYujYyZJQZX^Z z7U-DB8=oTtPJ}6pyh9CH;7ZsCL`Pi0qqs;npE`0lD2lUc`?G+C0@|lomd@c@cylj1viEb$NNKYQ?G;qgZ$ZC$o3a&f<7vf>`lf~Bnm%9% zQGE`~#c-6N?Xg?rIN)p5d)tT6) zVkWVxP;R|i6(3NaGUo%TDaa3~MtDbb8gAhGRO%Hr97*v6897az-lZCS*HPa()X_gB zkOsAp-j%oq46HsQs81)^^PPbAJoh5GoH`Ae=E!_h!hmFMBR`h6@IRVAlRx!enr%;6 literal 0 HcmV?d00001 diff --git a/utils/__pycache__/general.cpython-38.pyc b/utils/__pycache__/general.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47f697eb15483c9ae275a62e480bd2bf9268a66f GIT binary patch literal 37920 zcmc(|d0<@Ec^^9W&OR6{1R)S4L25)&6d{oSNv))5i4-@X%aVy9c61B$8p>wZEo^f*NqqSHF2Ay>3e*y zb$`F_+}S`<_RBvH(Ip6uV^WDd~yAuKYy}AEr?iarp2>d5LwExBMupd81 zhXVl>NC%Xq?4p&n@&y-M!G#s1Rj^lG_}6bDK-rEik^XmPN# zCcQ@PW5uD;+Vonvj~CaK)~DCYeWEyA+K}EL_jYk(dZW~Lb2=s0Thg24x;ec?uD7Od zmFw2@R=M7mz75x|;H70`P482y*9Ro^oAdWe?0~w7&x;Qr$KZ^e-tWh+SrLCg4W%E<-#UFztySyPdNurV z5ba9I^9HqX`cT1AH@|G7?Wz2x>BG}UGDq_p7avmAl7)2l=ijtow)b211=Js?t?IUmR{9b3bv2@H$K5e?huVHIkbYF{P@}kx=Z~wg zPgrWF+I2CMKB4|l-Kp+Ey0K7ROIG@{U-NxZ^DpI3 zPd|p=;}iCwz|%wNC-P6`Pv@VQew*5(_FfDupntE}XJ1v{P;XZEqqKBBy%0#h{b)cv zp!Q!3sQr0s`l*Fr`f0=*P!A&JK|kglh&iYZA?A=DlTnA&5w!am^+);j`Rv+&I*Qgm zq#nK)PM=ZVRBus_poX0KWA&&S$DLBg)d{4}t8b~3>J(xo@;fIiby_`!`$@TfTs?vN zDfOg!8&VcjTD=|D>HJ+t_mp}X?`HBN3xRwlf7gPg{*8J@WiMI__?IrKKT$cQkfNmi zR86WW+?6HmU8=A?ppL1V)U6XCHLYftBVAE{rb?=eR8>_`Rn&4;{afWI7cpA>J9SQ- z$DO18TwPFexO4Lls@)icntCUmW|gPvY94pz)U)bc$bDY@h59M=9AYl0cNYRkxu7oM zX-++_-h=lw^_S|BdJ!@2RPR;qLrh)0q+Z5#Ui}~HgX%+g^Q`)5^LW`QO8l7mIO3jDud3IOYC(NMUB>mI`lR|OZQ_sL$f=lKPJNoccWOUQ}1qKSlcYs{f?Epne`P?^A!JzNmfycQ2{$s(+^b zIqqIozo@>1RPR^6q<$IK52){{UsGSkn^)BL)vv4Hz}*M)?^3_HU}0>&qP~jf52@c$ zuOsbGs~@P}R=deg2 zB$kPu$44wLrtdOH3#B~nBRXHrXIsDu(Dt5f;7AoZ|(?rvS>XAY*KVp0F(|Ydkg5D>9RL z*D2JPEPAGLAycT#deMo3Th2SqvW50}p_2#3A6fnp{+79gq*uS~__5>X?#>)KaQeWh z!>3PW4n2GlJ;=IQ2Sug({nrN`JAM4&@l&S{96NT}3%PnWpYHKrJ%0G)gU3%Dz8XKE z3)$kR>rNE*qpD@>OQ^0`DNl|By?C)u&Yv$Rcgl-bbv{>c&@(Su)s+eK%1cZXD_J)) zQz)xi?45ge?Hav%a()C@jAg_pD!P<)-Mn7ro|s-e{@A03pFD9g^XP#mGp8RqdHBGg zQ(i=6_4z{Cvv#F>k3V`MgEY-|fgHQu_~Sj>b`U>j3j#M#4=h;o0oSSr7VV|LVvu_Y zu{UEDtQn4XaNG-Lom`>dMGBMUiq4OOyrAQ1FIdc%F%ackMdi_oJg-+Q&vLE=^ax77 z`taCPrIa6=tY#fITO1qBWiifUZbj#&#-?V^jLjBZOrklrkaIF~m0|^hQ9{zZ9+P=B zHfd(rXm!qu6`YJTVjtTcLJ+WHmc9em|DHtfS6}|}cYozG-~0HNf3WZy*Yr7p1k&dL z+_>oNB4`9R$R4t<=%YP88^^o1|JAR)d&lxuzVbA>7&?PF>e(7ybMjdzxR5ylwyEJ826C6s-tMTQd=9K{|f$_^|^IW{#eC z43pTdI9@bgK3C9{a$9{;vs6lmNx<>o&#y!?F@P#|8%N;%8P7606;Hka#Iom`J%(`4pCH4%j56XUk5i zP<9F`pUS3GB{!4Tsa&P(W-$fyXsv5()I?(VPS;kAG2fVzFHVf*CUs@D+A%*lA2>h` ze}f3>fv0=sEfuI^UObK_oyA~ret>S3n#Kj3oFSt-fn-?ais=~N$AxpDs_%b-g>tck z(Er*nKyJzy-B+ySvPI|q(Pq*k<0E0sg@P4*q2RcV7phdTzJuBFoEHU_)4HNDG$CTG zYiewk{8rZ3gCJlht&n9~3F~&N$I=fV=15K!24CWjo#vX8@q7nv>Xy^*1{Q&MFeib8 zPGJwN=WHd42 zBjNE{)Gbv<-3zW~oj-kKBr4O?i)3>gRWI~J=J+E50sSVl#*56B0o)n=W;_6m2US4> z{pW%B?nlf@2qXc4^Sa>XvuBF=r;xxQt_j#7bUq0%Bmsz+6(J~&Aie_daG+s2d6_IC;R4Y-Y^9UEa>VYL|K3oqkChH;QMqKw&U_MfhEcW2OS0R@1c=l5uZC^dYIAx%x`_MAI z3c>vxM$PpokF~B_x0L|F>|}n#8Cj*dhBXn97cEvMCxJS=kgil*FE*7eD@-28>(ZeRGhqLO`&Po@{AWQN})kC*o*qjTsg#D zs3PO{Xd2y<)yip}#HjXJLl|L>aUVj>8IX>cJg-sxEP1Rs}xOn>fR+?P1r zxgTAz#<{>J%DM}0xTjuN*_znCVzzWfW$&Lyioe4zpwA%ix*lt7s@^IRDv#ftIe2{h z$iqiT;B-tpPP*pAQ+SZ&hlwz6*Fk}}2syFJ`$!cipwv$UWjY0y0`<^h$PF(>z;%U} zY%I{A8?6KRatf9sK*W&+^oH^E;B-tyRqSO>FD%iK_;{`NK(%@Zgx1)}O16XvQj22a zC{Gk7YwM2{&giV38$*g>A(tf?m^w9EtyZ*)jpb=}F?_60p1t5j4hh|(Q;4gD#;~?e z13$$Cdu1v!BXP40aeFa{EktTW-^C(g)ne8qafQu&4ira8R;3p%&0v`W5guY9Zoipx zSr9rhC99xSy6$e?jwDt``>a%nRjaRn{X`%bv>?KxGI49^x*hT_j!! zgIFe0C>PvJhWN|SXTWx~KEHLNjn~(#foAKtX@Ck1NUPAwMyqUKU2qXz7&N4IuUPuR zb*hhNGWjwH$4us%Y<}79>LNWZ4*c>7{{y1d&@pHJAn=3|e)F*5q{A{cnvtB~}dI_t9 z>XLPn#JcJBqNmXnFr0!uP6OL^v2TdTl$=R#jRTaUv8iPmv3b%zPIGZ_+H&?Zr|(8Q zfSm&R#-g(7HkTIY7obGkg&3@xIx1E(`fy3KtGMiV|z)z5TX9``;ZPu zmli1*EtC~G%pq(3|mFqM;2p*feAsVl@)h#zX6VuyBz7q5!A=7jr zuuhLeS{=|IL^JeHGkA%?%LuOcC)o+n$q zp$~$&^U8~2)mMt=AV-pUo{mCz!%cl97zvDD$FH^5tzFr!*2*KX*ZAuo{75Ajo(CL{ zPivda*f7yvI5)-04Pai|sUuOB-kzzLXg`j5KHRys!jX$BxUkE_vi9O+B z%ne)uT}?p%R#$MmBO9j*i(SoZ+Yn4*Sw5YbhhWCFE)k!D1lFNCWhUUE9Nc}-J`1iA zq^6z8fH91Lkrw*Mi-J#quugjkAyu671$XL-t?xyto;3j~J_ezIn=R*jbV=ZOiG4+I z+Yp;n@Bi0q19!g&W^fOL9kunPygOADJR5VgxJB2)I$r_VifYlm>XZR7843(&W9j=*$9Tc3k>@u!?2r_ z)5-bJ+X7`9%)O;QRu8=vP@&6q8T2r?lF!#8t>-m?`Iy@c7Phw@gP;H`tJ(E01se4) zS!*zCJ`)@39D8;+grAH(_wNG7e())Ko(vF1ADlGCyKeuqjCgC>UkUnyI^$XAG&f%J z&L2=!$e98C7T)*+8UctQg4tNr;B_+`gSIhkX3#0*sM&i{SMBYoj(OT>tHUCh3N0OK zv=jh=9OSjY*9LJII|NIg$rKdaT3o$c6cE&XkO&l@uNo%pY%dfd<>#b&|_F^q3KGY3})|h zC^j8;rYh$%6j3#9XUjRA#Ve88cvc1iEJUS)6WH^)-VI!249)Phpzw9P_L=;IH2~JW z-x`D{XIR+%EtdWY-e0r#aG(w{;{^l;wR8SK6eFT7xesFH$gFgO)8UT$h>6F%cikx7 zLcFtJ<36|;H#CC+Q8}HM?#d)p(Cwb?QK5YS%zmsic?v_ z?_E3h+_U$-^qPZ@J$UTEsmv3{Pd;+$#DRl{eYrro=fvAiKXiQjp#!HLI(7K)p^-ia zU?G~RlmPmCUU_!8>T?jKY#Ee&hOxp}c)e{g86ZNh+o*CNVG%t?S0>}%f-x8dqnuHY z@MtjozMM}H_2nvITbq=y8=Ux|2wZz6? zoPr$ourrmdl4#;?r~fH}D-d9*GZ3O&$Q5T*UjHFqNS)*bq|PL6BKQ{z3W0p!hYx?v}-pL&3T~XA3#+$}g_$Ro(E>LCR8cvRKXlzo3guq}J zfywp9o#R3HonjqZ5y-f2^wBd`KIJXb!I`+(PNoBG8w#W@QI(&_QWr)x!;7EDlB2>H zc;S4h>dsvkeG`U%%jyih5COda_-z3S#sZVlZn(*Sr8E_hgUdqC0wrX%QKw^oYwO=c z;hG!7%Ei^UvW_3J8SFlnnD%h>tkG-0%LD@6?YMz}9~&69E$cEw`YQM`wp`%6ZIFOk z6mn08ROn?W094rEvgFw9w~rN}8oYh9>YfG_69?%(3k(A~-SUDn`8l0Ju_IwGG+C^i z@q(r71%odti1uP=a*n(vv?vLq%p*ykj|rOdK|$P~fx73vqZGf31D5^{ zp04R%Q;xa`Zw&>7kRkx!=EV^7=iwy?mDtZ|sF)0qAJv>XUa(v_?{!b;N{NjGiCC)o z5=-1eZjMIC8!x1?a}E|6d!?UcumbbNxbZ7G$juBhzU!x#0I41V^RcpscOq#F%iVYzVt-s6QdkDPH9CW6Tr$Q?Rp(&c( zXtm2ED_R|wRzvfi$4=R3HURzCNPFXEkK?UEGaE{5U}ow9H{l9ZHl%jY8=b;5Bk=VR zH!K2GK1ZnSFo@Tr;WYEN%|ZZ}=$bk?N)CT11f;wO9EvJ6=tR~!ZLdJRKu=K!PeoZq zs6gQ3W@w+F7xATn1V>@Jb-^Z4mi8n_4-*8#-1)sEC|n#I8j_ukc^tU!rVH zSv7QYK?rTA^>n>eIjDQwJjLu~>`hP$7|ooe{~k$K0JLdPLKR*f=6F!2Ar(yqsEQLc z+&|#CW1u^_bP{iC_n4_g1tbUA8FcOEU^k$!POu(=mqj-JvdsyG{XFu#y%rso?uOCb zW6kbDne2BrHhwiUI$3aQQ3R&R(d@L9Oxei5hJCGB2se~^5TcZSO#yf@e9savG9^;1@;S5KP(Re zP6GJ?wheN36LppU6HizB+w3&Pe4SRr7;1Eozg|LZ?FKa8eqK-ND@MxHBb+&0RjSuL(qSYOKhYqHarcHt_xL?imL>`9s}6Buyurm zph~Lli?Mo`>v}%o#_Lc(>5taK&}b$Um`)lmt_{pb%fqU7o~88Ffy~q@X;;XU2r@mY z`WcUHDXvzl0h%Nt?xt3ai(Sy0#@r-g2kSAEutv%ms#~~UdpT-Sq7CtST&6)|5N=}o)aJ|adjs>K zav#!ek(gUA2N^@=*JyNQHEkWQt!@})U}#}uLxYU5TL1PGlwGOWDsiln0%e@4#Y}>| zDg8e%X0=P4rrCTVJ3wqm)VoS6EzVS5uFnHP6)go=IUEJj23rKr)yG$!Ah-#h)0X zwZ7RZAEg*_+S$vys|<)lHR%VSDClcl-9$cmpTt7*oxecc zoi0>G^Pzu@S-y;5q)TIUp|?=@LNDb7XLS*jn=>hm##~{6FoJ>s9EQ-LQ4l9qIw`g* z89$d*_Cf@VnFH8zeEPp{<*;w%tRPwpgp8to*c!Cgfv1mI1K8|_fbs^cE#L|_THC=P zYVI0W!*`C}JG!%Dz9JeE%isCP9U6l&3YN(sf|w0iW^W6@;^f2}1bF2X6ioUtr1e5m zS*@T$`CUASWZ&zlN|FYf40<+7?zn_)dnL9uT{7t(WOulBAle&}rtTcYzpF8y-A?7$ z$i^SHY7Wgo{G2iZVP?_fIv87`OzvNIzMRI8Tb@LX?&Z&sp`bo<`g>6f(hjl+unmt=l48~T-3J?h)^)MmBc`NL!L1&Fj*CC~5x%8hnaB}?N@uMk! zVbzAYsC549EOZQtE6Oaj?Wr?ZXtgzb>&w83b2J5md1EoJz*)(v;6^C(+>4&i>M}6i zNX(1+88l7-1Y|O#Bfz(1NK$Y{AfO*dCeCq(NQl^NW#2euj;la?U~NP(6|Ap<5IL0%-7Gdk z5al}iDVmijK$akU8GAPen?l_WY@CnC4orn>%#AH#?_IKF9|k$pu+u8{N!baM{cJrj z38PaLo5yZBAES-l(+|zZYdy%@RmWaUy7BCLK{dvvllXN{kb3;4A*ZmHCSvgizJ?{!zf;MsB@ ztX6$OgCLjN5Bh*q0tDUMt5v^R4Z36Xk`XBnxC0B25u=XrdbkY8aW9wDqzi89sBKVo z^EL8*2ynO6+6C_HYc3~2jj)BXSRfL=5BpTBNXCD1i0jUJz53)~iKTgPoO3VVMtmH4syvwl>9`eFD3Uf{a=08z*4l zkyqE}*=1WM+LUd1qi}`Lr_4k}W(8aT?vR9x-L_Ht$POY{h>Hso1?bMXSV4rgBsI<5 z`YBFn;wtXe(0Ob%SgyV0ZL7AmW2cZsw#%#5rkcGeC0lrHbz@U(%WpQp&28N#IReoN zshtC}Zsjqdi$+IBYs0O_lu$ZY{o9%(PO@64HdcGeP&Xo0#{OM}>=JBut=r#ARjLZ3 zP|r%t67@Fm%9~D-z1N_ru-SuV;C8I@c#!=XGS?0|4O_9Boyn*26BA&0X2Vj_<(E<; zYs_X$yieofrdo+$)UPtw%RrFPn|Oy4MuBv{!JloAaYU3#ou^EbbnoLvY=qD9h}uUSnl~cv|A8w1b+S3&YM$s>D+@d0zoo; zw9Xqw$W1K; z=+o9ISL8iV9AYnAu%UGX!&4rH;VPMu6=GyTkkGEx12AZK#tlO(fqSBlOWbS2f^q~o ziPhY7c2I&^mS+k#vOFP|kj)sPoK_Rs7iLg@8)?0s5~dZz1LrVzVA*8|Gg+cG3_6OH zif-G1nk%(VaI*J8S8p=#E|exka&WyhI=c>uS8T02b|A+N${NV~mS#Z(gY_+8(<@Yq zSPv5^$nH{{bg-_j^=vomi7OSW0X7L+IN*<mO{GbvW;(H( zNn%vvi~L21DzXS>+VpgG;~ZZ20+0mgL(qmYY4kma&!N`lAD)HT6^iQpc&MGA*)A=y zXqFBp*2dFDJVAj>slj3h8XJx>gijQWASMdiNuYAvft78Kf!G986E=dl(T&dIZEc;K zSnN_!&XLPvEbs&n|EFNzMB&wZ;91BR+BO1MS&rq-?H(L8CCq}sFT!{|LoTGCMxEOTYZ-Raq7M9j2zf{ACUqXj^YCmueQdbkA4I@;(4AJ`LEaLzf2BO^(%EjPICPSgey&2zgU z{*r#{*BKLr*p0_2&SwkmBl$UR6+|AcuFM@M7JbuPgQQwB$kIDFL#|JNo1&lNOc_M+ z6qoeiU%wz2+Fk*jSo+O)e>GvmB@V>aSFBnPmkv}yj82>_lZa$_9ZUfP;nP4VID!L| z0=WWcC6RFwxlF+RUEf|0lz~zvL1R6AAC&ZPP|||91Q|-a>ZeT1c^vOn z%f>fgsBEbZ#$Rhur_9ouU<_Z2P_DIitQPgnviYKWW1)gURn!6TRnz&qw`%8};|mXU9S& z9n)OM&)|UzS(Yy*`!r89VEIrN<~)TXMsfuL#TzaeBHcwOlsI9Hd;}IsW(h{7qtmf^ z5CUi{th%Mb6d}trhE2xJ4dHB?piv~WS%~mmXG}jtxKn_k-+}=0%~(}eCLt7XMiK_~ zCR%4X%m4}4n?g0Q)6j2a2`3p4fAcWo1Qc=lZHy5VNHDJeZWB`?g(z*hQuE^Nn49OYNMgFQ`OBL>%QJVSX={Q+#sR;ivPtNH`d zcQ1m*U+iC^$iQ}2U!sO+U@7n-&>!0H(mYzQxQf$K( zflr2uF*PWXK4Zxjh2SOfN>FcAYmk$(KjJ2(eP63bKwd;hUSK^$+%;+lx$&OQK<7bc zt2?x~R=hSKr->gFkw`rT{|m{w^@{BbxbW#9$bS9edJrb@dJO#^u^4K;60!V z+r8=G`Gl0ZL6j;RMO<>Tzos@!Z(O{&9;+wJw&sSBJ_S4fkV?@%gbP&(Vm3j=dkaq) zp+r0{ZYE7l5lQ(zse6;u{kqh$S!&s0L@Dl;#aru`Wk}iMZiR^G)=Pl`#%jJxZFO&R zrTz8z5NL7{+1z$HIG?O{scjJb1a(4<)RUNFx4YZa9cUTxL#Uik+a-1f(#E95QPyba zc6Y?Ry)na3!d^87+s2*f%`W!l61KgO-Qzli#RX+y4RUj=3MfMb`!~)eg=fon!&BEq zn2PYY4ut1XR}R-ZwaD#4F=2}d3i|WsOBH3CQ?Ykl@8-ZxY7Z$ zV5w`T8g;?9pcVWBnv5vQ2rWdS`~UJ$sIw;H`2*C^Vmxm#6kTJhZ5?H8%7(6K6Olc6 z1F_~ZvSut$P>R>enk_jXCh{s+8a=`ew;HntW&EFw#=g(VIdDySuB^yOdMn=_1*iIcju^GGCT4@XKs&^~)i zyBx@<35rfI$o3~w=z0w#7=FkGHUx~Z!0b(kyG{!)zXatBXv~#+u&6I-?ds?$ zweXp&GX)*$NuR{4g;F3cVf6z3;i|Q*W;-L8*{C=$KAS}7%!0|n(Q#HRyy_alvbD@8gDMC0Tjst&MOr|2B0MSV=&Mx4XYgYLzH8Sh{EFna5Krf!h zsUP_M0Fk%_g~)30x_D0T49-o=B3cMLnV2#*q%-w57_^8K5ee29_ZWj&1}&PU$j3MW zFM$T)^c=;5`XFO>F__`2Q2s(bhsIMNX{4bfPsAa_NVlw8FPVjGw*fw{%P)&oF~$lq z%fdPrr(soHzF1;#2g__4T46{9AyoK^*r{Rd>M#G_M{4I8#0NqoNVo9NXAl-N!5B0( z{SYPf1qVT_CPB9H7*QW$L9p5zgX_TZrtqGJL}_)yv&1B2`-Sw1AhsW7t361+9Wfg% zT|-SBE1G0jlN9?)JW#V!43t0?l|V3A#^{s>YGNSKf(l!xNDjnMo`e|5km;z3z(jRF z58AjOyw-!H!Y=`0m}kf1;3S`drla3YK(sZXy2>%PYcZ*k*g7M%`|7saO%fWG7q;_Z zaG{p!2AN`;XSWAJFzPu-ko7bKSgKdVT&p1F`ZJ^7QGKEh>6dh(eKDIfJTtV+X!^%x zZNao4PJ#U?+Mas-hp-doT;Yx$G^WHI)G9Zo=H~PkJb*?LFu{Zb6dqB_G$>OTi2zJ> zTMqHH;=uAw+W--rW;_HNY&_FwMMYl5xvi1t_z~Ex${$Wr~vD}KVnKwdoPB$4PCsGXA0p2J-blyEd0-U)*0wb-pM)G+=DQyOI2E0 zOQtU1Oy6dQr_gEGRJ+;ik4V;%$+qXIg%3P>0&F3)s=4YcH11XBh*>ZYnnAec)S_o# zB{5x{tZls!zhV-O`RPW2%kM@*^!wOS5(PD1?p21sYjzd7DNS;~kb=)q#Db@5yBcM~ z8lhpR(#96~hDKu`NoMoT*u8sp-Z#6iRg&S`NvUl2#hN5sI2phW7jmgb3l~t)iH5(O z(bOr}yo{{&x(%~UrVF--2DWuJTdW((y!&$w3ka)o1sv3Q7aj~o6J+)ee0-e=LNK3j zG`B2+Cs!c1VUz^>$e&_?QvM6PgHtvf+hefAjL+c?UJ>%eDj8`r#_!=02obTI+iKv5+Z+T=6$#^&4k~NEx=qbq^8CiY=;=Ck|lvx5G+Kp*)46e z&v5Jg$ln13md@=_XpC*R5mW6{<0K90LLEkxLk1pN-S*Qz=&?veS%RTkKhHbqU#pWp zhq&$RB$Ls;9acyiWLxF7-&&OWn&*CwfYXmVkbh@j%Itt-TnSe-3#9QS6o(cF126l~ zbs+5T->hh)1!uS(_!&UH0}^-^YD?g}6Az*}5*CmCfgqKHNeCEupgNwdB@<4WC68{Y z5M^`psA5AR7lg=6(=fr{rkm@+`f?uMCf0lgh3;+^Ni&!cssw7l{eH5Y&gbmSC_>xU4N*;B5`m^&8wdj{FVW&q2pHUI8n9=#L%MpZ? zByz>2goc0w)(7+k1M3KEhUJa0x`}FBoJ}I{8R_F1$qPC2bT1ilk(+OkRBI*G zdnDDmYg1jK;N`>UDP-lo;BNi+=@m?Yu;OXkIi*t=y;_{ycWTGx)Q(#4SZY%(vMIHr zf@>eHF#U5O3*JLx-yO0-gkGQBgp9tm(V=!>G`N86FbO~&ZfL^#i`~ZT8fJj<|A-iC zh?!m^ACSV+m_U#gz_tKl?#Ws$@fhU;V$Uh>gl*BUA{D$KWp(PpL<+VFJ9dB=2F(Q1 z&Xj(QFJtCqEd@tDg5^Y3EbFzo!-49~mf?-3NsO-~;JNu39T3;1p5CW%CGppw zQ!Z~=&ZNXiUA`Y^Qcil920w#nV7$eS9*EnXcOa|I;dO0Yqq0`@4NpJ52v`e$L`oKC zAxqh+7o?1c8!8UYk&@KEj{6on2vuGa;0Ahxse!IQ^k?x1BTBlvvu35GIHuhMzG*Jg zw85VQitv?p6XFyi1?+*%=D0u%3<|V(HqM=p^Xx!+gb-UQn+^*hUgo9_{5b@JZ#gML zJWWkGReA6&3#Nad0mp^`fRN6S)A=$s&6YKQ7XtAImrl8Sp$IA@i}G=}idGItB~zw3 zZ^Uq5f{o`aH(s11?wUa*pryHexS_W^?u{Uvf{zHkDgZY3MGefwQyHUp!DAX{uIpPc zPK8k!rpvm-))IiQ`1}I0%IS~q^EnXsuMaFMTsoA?K}0luVlycFqCF902QI>DVC7kO zl-XjCNE^{TVZ$HW046jC17c7pM=rs>WGH|*9IuvwWI-&mb?D>g*vDWFDtEhD(9a<9 z3M7#@#al(p+%Eifw^p#se-}B;{9lhXmdVYb*VirpGdF`8n-7R`247+zgZhiSBwP#Yn%?F#AF-s zr2Fw6|DFdQFafm_UA=)mq{kgt9f0Zr%LpL}jn{xoX6$`Ji5h2bpqxM)Z-5X5E4D{(bkMdx3jWeDOl*!Tac}_~lfV#O4 zcxy~*YOy70#!vS_g1;Y?aBqm=H)!=ljO0##10{E$TEdt61Sgo!TKj8XP%T*ku6?K` z;kQUZIk*JX@c`ZJ0zLwpYvwAqC7~ra1wS)EM8*L2)p`HE~(sv+u zR2kq(IEbIazH#3p=l7E$@rGY|1#k2`oTYA2o785tMcqno@@gAB$lq9)xEX;SW97H-aT9~h47M=1mBCg9w=p24u*?Ou%+Xq=256a+ zcA13D@)(1i40bWt&44&~`7Q=`Gq{Jry$mSsStfd2CK10(DabN~e#>uWK*7iI0}S>v zIKY5|yL^PfLku2f@D>J-FgV8GQ3m4-jw8^&&2GJwk0%+NX7Ct;#~D1q;7JB=V~|GB zhA{+fNFWn)FjvV{?%OH219y9N85Ht!oR^Ov7mC~ANrKYyDc#E_^yH~ilay{JIZvjuTeugxyNE(&C2@Boc^)(Nxt^_44iSqz6EDY=`7%*_cI`h1G}G9=Ovudw_Ils5~8e< zQTT)QQMk`C|MYE$z3~8qFaS7AC^AfdEW~Nakb+WUDN8pWVERXC8M_n!N)d)1G!kUZ zEr7S72$_NnlmfKUIV_Dl*BKN)4^dGgj~S9i9x1uAR<^e-y%~x|`s<^Y_VPQ20FF{& zU0SGS@LdY%jJ5zaZ4~cDLjQFR%+0ui@aCGqYGV0mRQ`ro=bF{>V^)R$6lvOLr2!#k<&_3~DhOAwz!o%bB^HQWm@8xY z{YUnR;xuB>E%>K*urEQz_Mkk8l#=WMP*gG~EWx=JyCpp7iP8 zV1O~^)@eBdZN~PSh=Z!NP}bi@3-w+miO>t|B#&NzbCwT;M5csEF;jrc66YA?I|D1B z17GBlaW|$Er7k{*63__Z+|peH5o&1u5it^o0wNMhu;m6qf}UU}rTI;8!ORF9wWXsB z#g%Y9MDtg68Zv)u{jyTQ&Cs15=kxO_2u(?>;&&#Y8H(&5a#1vr9;EIB_rOGBkSPH&KEos=qEm_biL1)^Os)4V- z@z)4FpBr`$sut*xmTcGqW8c^Z;L(hJB<52coGVbKhO+qFLdR&aBXRtM{Ue<%>>rL^ zl2eCCC>m}W%){Gj14}{QBI+G1&=z=e-oO;6iboRG`jeOqSKv37_gMsah4zKFxZByzRoArRD&F}-0T1heEhka1AF(-Nw8mctJwQ4ZxPao0 zr#KTR{D9gRA&@+%RCz*(0}M8>Mq5lB&Nv|Q3`)9Yu2$>>+>exTa6!Z=crQ2yB>}fI z=p^VHCo#9ea6f_#2x;yEs6k0I-a~!e_8xgxyvJEU&P~Z5+8}vp2nY2vwo=luI9+cz ze!)IX#!(9%vdwbb2KWHsBycWnSH|$L?)Hs^gDw34V$DQo!3z4f$3VD|ec*aP9tMcz*#AQ)pab*a zxkU$m2M?U@KK}*{bvtQA{DxVGZi;rh@lNod{|>XY{x|rz&iQ{Oyn@t%C>P(%sAe=RBd!25;$keZI!8cr zv1}pODNUjLkKi7*HrhZme0O7jzuF1#Bh3a2roI8-t^@G$i4{4o3$zXf&y-@)tpFC& z5m1^|Df(dsk1$x#?W2g>$!?o1mfhZHZ?iPxzfVX60^fY#`+xX#{{7_x%iA&Ib8!~3%D&x<9jLp>ZkAe)MM`#yi%hSPmxHGNCr$aLl_)JLX-QWU1Sjjtlo)f9?#Ts~f zf-zn_2J{Qg0VelAgm{L2W*(p7tXm8 zRG;-z$;r27s>FPc>YDGRHmBY-02X(?PxZ{Ng72s#a)llRGKbHoet4&dp^d9y9GFC! z0T&h$5GnT6V{5?Iuvco;%l4$jGUjh`(E}0!uLPg|8c2@8`8D-5_3rxMT6&>cgQFLg ze%(#hBaGo0IsLl1pC4kmcrygcrX*~;-p{2t}y8$rjpWs~C(0~K!BKqm5uV*@ZcrwD?lpngbhUo#ou}2NQ zY@_sz&C)m4H<E@4qaST(^gpEfOsg@1gJ3QE5u33W z*PwpR>LxtrZ?4~rgEZR+)AF(U&GlY*3Jb!WPOCJG8=S!op>0Di<9JWKmj{4YMtS%~ zsZ0&c5%OD8XUVVNLymU62QxF(Y;SW^>M4jT8oiKu!3^CpzsX%Yy>4-R{g(PBd~+%| zKU^O+v!Nc9*8UejAKuc~F1+i1C8VEKk^0TD;sSU|sI@b^K3j%3pj>r_Q4&Y(7T5&a zSjC&`o9mlY|5^w>=C@!}H`KQ@ThVOWlK5MxZ<*fM7$=Zo9QWp2udiYq_2K&FMWlgs z8rH0|2&qupP4z)+D+#3e6z?%FZl;eJ6K~dZeKpAYKJnESa#M@9sCD%|wf?n`8aB4a z_-t$FteO8eVY}H-`-SmJdB>w5+~K83zD~pwdhn2u+BW{^sgV?J;G{Yad-_}+wmkIc z+gZW3^7ea1 zQqYmXwn2Q2h-xJDcFN7**98td5Eh-Mr65BdzDW97$U{p1N9<9d5QTiHZFsnhBSHm* z^ZodYk&>44G!ahS<6uy2>-Lm+jS%LzDV!C6+XnYMG{Px(ol4Pj$*$3zwPd51hbxbz z3W1HVuiOba>ZI8vzWso27k%Xc9B29yQcRYZ!;YJl{Jg}#BpRPx;bC)dN6{I4QB)O5 z;`dz{DV$fjbE1}Ta4N18Hib~nEPJh;~$|+W7%8dod|=;wHr2P3*&^9S%W*+I%HjuOM20ArA%)F!rkbv zF~hf+B6O~hKkr4%c!^xVi_!}`Y(DYj8GaFyzBuq<&|n2d62UT@rBj~+Z=EGko_RJB z{TgD7CzV^%3HV8&{V4LJ6CCc03|Bgi51*P_`VXRgIlkFRSKD5fIN5+#TWCi`6G!#^ z1^VO0Ij@rGKc|RipYfHa_Jv#;jvg;$&QGN!Et-{15Ug;T3$Ui)hF3x7o}~l>heBWz z=9)(1TcL1C2OT#U&4SNy7SD)ZXfFs}o|Zb!jBtFOHBF&#&&mm11{VW4d}cH^!GOl&W3W1q3>6aswJIKSXPX+{zTX|MvvdXUMSFJ;9B9-^Ok1rC==^neTd z4L}8l#^vL@>DYN-k29Hq(q{psbkz4!DJvc>u&b5a)DcE z_)LNo0Q@s7 zaf}9}Hogmhsj+A+;SNMACK)y0FrUZN;Aviv+$IIYeUc((JXYhpZb*L!ZYM$*@(50` zCg5fSr)uF&71K(e!!-hD5>c8J!(i$|bo+*BMwg{=8r2$;TR-(DRiZqA=@cJ8y@LTb zWW|vo)s6OaflLPZ1>^p$An=p;_+*#rm2|O2IuLhAN#SG5ydSyXz!kav{X9-`!{r6G z{rT>C_jE4}u`UMSrVY<--{LB_AF;lC54^8WRxZ#OqX#|h1_XLpFAcZqJ!pORW$1k1 z)U{U)4RH~e=e3Aix40T#sTbIxq+yahM$Ttis2XWDNE*~y?=k5}$Td@LG<|Ugrf)*a zP$$fAhRx~j#Wf&Ule9GGJj}rJ!(c{W$gFO;Y*Vx3w`NmoB>9~a!m0Ugv%HI~tQW+f z4QJS9v?4NEba1zY<7`~v$%4Lhyp}MNZZzkdtMyLJRWtrG4>DXMuEabJIQ)^ac!R^F zIL?$HF8?}`tymm#nDlqJASlKYv#MLnw>V0(@Y@P>(k6jy7+2N8ll%ZNKDpa!tq?nK z)heF_Pvxp9!2*I0YT;XV?{v0n7pp~YWz|F{TwTQI6{ z``xhWoo6}EAbMqA&LHm3&=+IVh%X029GkgLKm2calF)$(*=(uo2qjdzeRIQ)-9xl} z!r4soNr2R0U;=T*21-%r%0zAT=G240rzyfjBzR@GGeP7r7*2IW8cNUJoYG%F^E&#; zJ~qd29Ud@-{ZJ8D?5zsC7V<6RAZ>&{p3wb(;<%RH-)JPg_L0~m)e8I-2&i5v6+_*P}hJF{P zz#Bs1+wD!eaXoJcgz!%SgH}bB{i9kWB}-#uoqu|lCyTs@q2G-$ViMzQ&USy7cRyr6 zBOU*1dHlK#49jVR1N{`gZ47f><;CEKU1fk3=&Y0;1$>1As98{Bnj$kXbJRQ*ray`R zABwN!;GCd>qqjNVPn+zWefr&io>+;IMS^pgxhhmd*mK2oedNq6UBAnzxwMatSaG_` zEcOgh5h!bX$l53nlB`j597}A)BYdP%A>Gwl8`l^B_H-`JuX41wd;{2m>HdvpaAQG| zj2o>%5PnJPc94j4iju5Ht(P>yv2#wN%HVPSXRnMB_La}p8hJN7I5f6%{D$SvJsm! zPWI}J^)R<-p7&|$ydixv(g|#NJn4gF4H*))MiUEV6{t<2A!EE-nF18%HF;Ec0K@?{ zofU5=N234r#RN9AhR)ruAMd)Lain!MY6e}q;yq$;uojY!%lhd`uATAT-plSbx8kZ>2{-Sa$+9A|(0Sk%Q>mIGp@2XZd@g@=<5kQ@dU%}^zt z4^p0l&xwJO1~h^oRIKS$b%y=obDp`9&8=5v1efabZ*E?BZ)jHfsp+amW|O)6aFJ@DCIS zy)BBvJ_uNfbH*S~T+n`>3!AS3ID~H&^$_eMwp+upOvxwSheP&Th4${T=^b2u5>3su zFVHBmQ<*Lc^u4$-G&Tj-q_MSIw;V86RH5tbxPl_bQo{FCCd8vQ&8e|q>84LUN3vjp zLEA=?m)Ah0pTd>d-3dps0)hE^&%y*4QW@Za~APnG&Ls#(-{2k~;f)&5!AMe>n z0V_8`YPyX*I_!k-g;yxw?S{2M*i6S*p``E)T^RhojtpLmOdyYp!jBq=t^1&hV?SsX zj*n`rY%or`a~s>nhDt*${TW=ZX%tC5Y1I9=sckaNB3H%F%PfIiC(BLdncRs&6XCOy z-~PaY;XgPnfOmi=fmk3mgu0MR8(%evG&G8CNYD?1P^ zlw?cZ&K`)q1}6lD0b(4fzH+^ua288X$Zm4h;v+4Xn_}!Cr&Ohl_PEzGYg~(DAZX(! zz4##;@uiv3RPgAn3_g!4Ry0ZPK{M}a11O&%-%BAqF5BgD1jC|OK%S4maR9>>dY$eQ zIH_>?3tnvfznDz$nWYCVjwxr7}kMaRd_GW4jz%~k&(+@G?P)496z>AqziW@@Z4v7 z>3xwcJIdg5Y^bd0X+D0H!6*XA)28TO(0oJq6jRmsP7Y?%@XWX}2r_9Lq@H$h9MCbR z3I~jLUR|LJ8wg%Uc~npn4a%22rq~S5V0`y<_6+>hF`0Y_0<(FJQf|#<-b_Kvp3J$^yxwFayi?Q$%yAEFLdPLV~2HtMgB62lwI!_?l%OT(ah0v6arPsAj$D%793mjqKycgSQO{B5ZFSx>9d7iH!_aTmQEWFUz&FGLZ^$z8@-c{ zcQUw}!43wrl-EZXJjLKRgQna{#K<~>I2xhJXKT%udCxLgnl0};XlB_VU|H~$I6|rU z?ga)PWAGUUoU!sHO!G0tKVh85;5!U{z~HYL{2haTV8Cw*$=5aHyA}F(`8di_?qNW; zxyE1R5#AkU@Dzg@1NyfRml5L6z&He$VBAgyUtqw4e#V;zkLl?5Gx!z*n!1bqwV`up zz^Z8&DHerdGbhGmVoGEPAR1SQ!e3P2BGDCroDl9p9LQECm{0#45_`#qALV?&hc%BK zKPg*^U}J9;zbA)P1p`2SK9f?=XOTV~9!JVkZJ4x0z{K_$x0&}N9ZspD2cZx-k{kWQ zhb`OxiFmu279TW$P!R%Z@GTH+58|lv5FBJu#K@1K^jN82B9aKl@UN#U(bLmQL3a;G zp}}CHI}+?4=pXFwj)ez$6TL_kNko$P7fZ$xN$lv@bk-%pi6lOvvMG^_^(MOTG~AO& z?2ip6B0UkL-xZ6-!pONUv3elNe6a-nlDNcTaoqJK`VgCxP19_h4F5;sT%xbc6bUGIK<%uH)EIy{m_vTXNyy&KQQmMmpAfN|{gdgILotZckaLJjK-t$96% zG}AMF-Lj7Kgj9JGHh?P&gd@9~VgwY0IYJds1yqGIf#O4e6dy?F3P%!PBM1e>hkV!% zgx~*l&uAnYNEMl>{`b3&|NGys-!nRDDfqo@@|jBayAWG6P4YMM8D2pB5kAZ3P@m^#_)$KOk|}*yeV zufOsM`u*;u+Mj=peqX<|N2Bi1R9KGYZ?v!)bt_Ug?2iRA6yWMQxexlq$Q&%i*1dXP@nCy9>vmK?Xjlgq# ze*)wBG_S(4s>tD;tn1#T3T?FiBbrI!k(;g}5hwvRdq<`1aXi4jI;U_pqjXuodRho7 znp1Ywu7;c*YCw!OtK^kXk!hXMRgG)Atg8oX%?JUh*RC3}qmq*DYCu?OWsjC2(gX-K zd*8$_GUQ3pskLyhMKdx)d&A?A)^xpyd7VgaV|UR=rR`Om!1eG%X=HAkpJ35H&*R$N&Kuq6!HaK}}&wSLv!ioU+D3ji~7^dx3pa z=~^!-Ye3Sh(jCDVI@h=kO44f7BikKCnGueJ)CLHx0(o%>Hx8%e9J%gTcRVm(!eKqF zysX|l{Q~PwbSFa=POdYo%nBzk(~&UKOYJJ%e3<9jI`cms9-+7Rvs)kMdc69A(o5s$ z)b3QAPjb`Yba*68%_v8e?hN*l39}=~PVt`MCO5$u8gT`7lDeyh7zi-;j%~E$Z80pUu&PvWp`o_6C>;)~%g%gy!pmgWDM??K(Rjh`y zv^$v^?PY;KtU5dz&h276LT=}!C(wyj)u8lJT$;E=~op#@=1_6S{bSmOrUmts4g4V@ll?; z+pixRsvk@0$A{|2hw3Mi`pKkzGMw+>40lz2i)fAXG?;HaY*jL)E&PL$c zLEW{T+pchGuH6xCrQt&$pRsF=+pZ@oQRXz8w&U^rdb_eAZoB@1W#jjJdqY&*^L9N5 zI{x{^#abhP{9CBBTZ_V}I&QP$@J6%gEWT@X&8-AypLPShFw5C%vDlA?dDy-zY8x%r z3w${&4IOmO+x88yfu*(mMu0W>_Gz!<2;pp>vAwM&yDjK-t{9I&eIQt>=oL>!Qk4`9 zMyQH*%RYB@X)wT0A8eeLD@`B$o`>EFTqev~mTaJn*HomvX=HH3(9vqML3t z5aW=l2NW`iSyEv~NZif?)Igp$^AK(jsiFFkvTh1JWIx7k>CeZSR7&rn7O{nMk-8qK zx9_Q?+=kb8RT`06DQRB0NuX2GsvTs5P9-v99*8m+DMogr zu3^>40-`Kp)g?TMdGIETD5QCi9X|FGDE6-cC}UZ5UYk@cEw4VVPO547A6FU5a#~h> zOg@w9Idxo}(+cXenpI8pxN6g%QJ<`uSH*iV(gE9x43L5C^;u+~q+o5;$k=2xwMvQ@ zrUS~53uTQg(@;hxh1Sz46`~#eyD%uadI)0zN?oAFAftktyIR*k&cu@n4HyO5t`-`w z&rI(fC`-elGkEI5WYRL8;%Q2)lTv}{(M_R627OZ8m{F)jn3{!RfUTF5R5%8 zu;zFgPM;xdD5Ye&DLw*6Vk&kT;o7AWw=u(yfMlklu`7@j8)PqBbKt*iB`Ey-rPpI8 z>Pwd@(wV`m^rJLk`U)O@0RU7gIRqvg_+&29tOmT&}EPXK*U?Dl@>BoP7t} zjSI@HJcm1qU85gY`$jn#)a%5}@WGOD^qWi0KJ;D#JlhCCMAd>#3gqi zLjt2m+^?*q)^$jN6yySCT1yjz@%dVZ(DEp5A0yD$3^D?`2k*KFQ;xyoq4}@rJ3&OyIJn|EByA9!Z7FG!YE~noKdF2SQ z0J*zWwA)pCwY}B1=t)XH1&`bI-M}Bz*^Y2+cdLWZU>yf-d(|Za^7Piy8G9Sg)-vA9 zMZ4M-;L3f2e4q@@4^~GL@V*-Wlm0Rp^xwa7#(wL~A_h7z;2CMr1Cl9tU<7HY4SHWY zV?RN?WxKEEa4PFachC&>V`p2Op=M-yin9cySj<5Az}js4u6P?2E)kGcrsjCt#c{z=5Lx?2 z5ScMDOID5i8EJG!tX3&a4B8BeHLOu8D$;%df?j^4BF@t~hi55L%dzTuH_gC!<*>LN z?TB^-URimS`hEnh{PO^c6&q-Cs?O$Mc;(f{*f>nH3rL-m|5MCjIpoHn@A5Fw=9!6f zPTe_nSV|w5;ec~OF)Mnm;E|pK7>ZPj3Wr7>2VR7lLyMMGzeRl@-7&(>^x-LEN;xnW zoiwRDIoC20G*?fBh$JNd6OhCF3)KQbbP)Vd28l73q^cAw)enqQ$JY#?5 zmEOPq&c|RC9?a#<|@5cLL>*^-5Dobf79u0|jb~ zf}Td<+Fi!=8O4LNN4N@_3^7Rjxv>uY7?^9&JouYrqTJOXA5G{i=&K+@5*p#RG$gpb zWKGx$b3k1E40KfAXbRe9R`#dVT#}mLM`g-fn}pPdgy-{!vLe2Imtjt`mk&*=O zik8|>{`I3@8(EmRSo=W!Q_p;PC%9B(dxY;jV(=UH7=@E-eq+ZiWrIeo9+aETc6%e( zBN`O%1E|Hp?!8MzwPaP>UQqVoDZ6_W)J4`R1WYXs;Ofps(1t@=^V-7Qi@iu1c#oW* zlJOM6D*hYV!g5tw(8UZ~y{(ercwRdwW#p^8zUi0rN(;`bR&Bv4_HS)Kjg<7;t%g?` z*+O)DJ&`;k6e>?LplX_8A4EEZS4!lF$I+Rc@d07l*Q5=&ldtqi6@hlLN6E?GdjsPj zprG+iqc~Mc`L46t7G5lSr9@1@R3r~QcHcD*PO{c%lApV=O2qERB6^CDqd_4?=}kv? z&<1{#gGjE_%Y%H@Z?rlHi!_?hx}{Xzbpq_XG>Ts3DqQ~xt^q`3Q8o@UL>Wp4jiBj9 zDe|jvaG5_d(5JO_$^l*^G-F5~KBqe{>{{*oA=$!B} z{S~5Y2JH&nxiB9d!3emOD9}v{RvZ?>8I;V0v$6y$51>rYuCU9H(s1uFv&#?(*SU^c z7^<7b9YS0;70&M0o+ zxy?bPoy7kbYT)3_U@aK^;UU#^EPN=mK@BIvhY2qQ*h#oikYzG`JUjuc(fUuJ{8TuP zwVpbfw1e$pti;x_6y)S1OFag*F)H;9Z(XdR2pk);?G{R#P7Cozi0(SVh7Abe_GY9z z0pxMYYnN-n;l)QH9Y*q!gv*h>wY^y{>CZgdeosk%TEZ)U;%OS0p&8r8vx%pUXKNXG zJezpx%O!1dsa(?P1U8o`wOo#j)ul4J$=9-cVF1dZpRu|e*WnfCWZkl?lWtaINZ$%U zJet0;i3dBWESGdqZ!e2Ssn~Fuow`#pD`jXwXNdJ>Bs0C5xBy>9+t@7Db@egz6#kDQ zCYgtzn$+gi`ETlaK}#c7z@OrjXCSQ4LmVGRI*b3fDn5rX4oG1Y1p|ZBMg|c}$XSY8 z%IKiH6Gjd(fCBD=<0Bj#oT#HmtdL1e!N1Y4*!yX5uf<{y@ZpNP_$& zDW*uw(Mn|GLq-%Jq@WEFaV2d-h%44boD|SN^nw^#hmFLLLUe`VqJ3sZ{1keFA{w~% zLwtj7!uLl;nM!3~trW-=_v1(9KAoFbuJjTAUw-qq$7gefnwZ5kd3o39B4fATUrsFikX2W*}-m{G# zI4r(V^U2eaHZ}!p2%pOodU29FS>zEPq0$8eX@F}Pm)Tm{ zUfNpTj*(4GWT+j-<#iUMjYwe}Y3H7PJfg29xE2@hWI>? z7~9b)2zCmLixP@(tAt>AarDqhir=T^e@NgD2>cO&KPK>50-qu9Cjcc&Vnxc8Qij(3 zze-Q%6dgPnPD(n9yPjBH<~8vJlpG)cl7<60^iE_Thw!HlhA2vji*!l^G2SeEqsAo`8W%{qmG&eKka zXA}fONZyMtBE6IC-}aG*Q=uL7e`NewcqB>N=#3jEDFMmH=DPHqNrJL9O-7rIySSI- zCL+iB9X0ln#RZ;%5B(tz-;1fL)K^a(`%#jJsY1QKN>xAd|I|Q5Lwpf+cxQ6D?NPpLFLk~ zn2kd$_n7!W;BAlO!42^tKxjA}mpk#*_a3=_Viy4Z?pvyGvNO2PbvGLv)`}rL*hsGl zcN^pUJzBw>Azx1_O;{^%ly?gB6-5$Svme1A{Y(CPn(QLJN>ls;fj1L)4B#HC{X*w$ z))#&r*HT0VZ&(|+0Two0z29S{oU|8$cG>YNb$FHb8DIvw z+CVuOBt6UkMzC-(v)>=g!bkD#7g&MkUe&=O_)?85cU-=VgT-_{A=x8^OB!7gd8#)O zlQn`)S={Nx?2$qMYmi>cQQ(T_C3~cLW1#`&o%$pUCdroLV8dwyvM30p^d^RaP=zE8 z`8U8;%pGx1id<;%cLcsn;41{i2-FGuBfw5}nC!(5Q;|+zxQM@{CjUg>p9#E9;A;Ro z6KD6&%|1-KlS8z-PqC6%CD{_CNV1LX2?}nAe<47WEB=+h*9rU^fkVV<4H4^rP9rlR z{UY`F7Krvd+PyGDv>@4Wb)MxR!Osm5ErqTHwMEF8P*yaqiW4-2k literal 0 HcmV?d00001 diff --git a/utils/__pycache__/plots.cpython-38.pyc b/utils/__pycache__/plots.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fd07e6873a65cd992fddac4160ffd11cb51ce6e GIT binary patch literal 21672 zcmbV!3y@q_THby2b9!1cqtU$daNCk)jVzDewxso1k|mGVtUZ<`nf12U+v(}k)6+Bk z(z$n}na&-@tYk17#G1fT1Y^>}QUPy~4J-xOkj-WbNPyIa+K`2i8^O9l5%B7gx9D)xErDvyNAb)#LfNq+_*2J(*8R+Nq`TDQT0=r{&sL z&*b~NW!d}=$s5QI%5^BeQ?A4LVO-<2UGx{%Bmez`+IGar#na0z#3A|T53=Y-Lcfr zZ6|-MeC)QBKVIIacFs9!SnaxF=TD%#Oue%nU*gxZbrQ|d!%uiA&y zY4x0XNbN`JarJHLVRaCxC(66kht(l9dB?^$&Zy_rqv{Cq&#K4N)Ez5-P90UpaQ%RK zK^<2oQ1WDX;h#=q#b#sW^0AlE5%?Q*W}ArpMOT{rk-@8*mLz_r5vT?a#> zndMTs6-3EkUYCoitR~Y@`qGOt)6YFyxcuDJ$eVs~=6V!={>5jXedcPEF3p!qi-kEf zkA_TImv1apb-7+{1U|}Z)mCA)xm@<6Y_(A``Gpon7A32Gq3D;Y)hM;Ra%=v?^2+iG zlglee-r~p;bJd_x_I)J#{8F)2E>vas-qnjUA71|^K9MJn_WWwQyxxZih}?^nN{Lgk z_2c5^0)GBsfU<>cWSK35MRZk4c`Amf^~y07SBX0o7Co+#nC5g8ztpTXwO_K)u=l~1 zwAdqm{viNmuiD>b&0&iw_f9gjFQbRItb!E)E||2VEcEBiOb+Mc)6*x*TpoW3OEIr+q;C%)xX<9NfJeAPVw6)Ut}8(zhWbwHOw`yK0D zXU+aT=Z1&Zv+vpaxoq?0d9IYxZ^*CM6U&QLZStH3W9Rv*2;~@ZcIA* zA#{=VFnC|@M^ZmXZ~!1m_B!L8&k3;{&l$GcJMMefbcsmO`*2MBUBb^l0T2Qm0y_j% zLCP7i#;sL1bo5jRIzDazIs)BsYtFrfdqJ6X;@dhAP!mkL`XDZmTWtiIuRiI?$RoQN zxw93mbI3Cw&1M4k8n9&}zN3A#Uy2`oje7j{^H?ViundS&J_v!ioz|4EL zW+(4C`T-U^#p%8l}JOFwlSrSf(jG2y}P;&jn*3nKLDV=iT0<#Bvh zU~eo~YSqe`!#B^66d$Db0UX88=Q~AimR+2_j3QST7d}QMyp5H`Nm8Rw2q1 z3OAODHB+J|(O$EwD1&_fY_FGt`KFR5Y+1XF1=QSo#7{p7kaUuXwCC88yG}`>BqKtC zAErqQ1?@hMM1VE8ZLb1$>@`adhgKsQI6y&nx3y}ubAi{1b<0i#an@~kEDh~7=Odt7 zo)sh()B2}utdniE_XSDTu`RYU^-ayV^-)thh1zeK+5yB3*8Ql}$f2J;l>Gf}iPy=j z0aZRKziZYjtmC`_`~V&}GwuE>)mp82>!}Z(nL0IfY_eYy5uLbZQDq z;8%L79IUjkq5VKdDZ|F9&4%u0aq2pXgbV5{?ll)X%G8=gRZ!)*BFGLfY>Xx21Tyg( zvma#*Yim_&!s?zt6`z$YCuxt^j-9i!fJ|rY41W3nDbr3;6jn02^FrT!HI= z`?wHI(yEVqNbOerDvMN1?NI}25UDuCm7QuBsf604M!>g{YE+Hk3b79JwJ%CtY&4p{ zrKX;_m%6B{#oAO5%uPpbakfE=>H0mGN-22Y8EQOut&0`HyLt;wXY0)2(q=~}f>F5Idr z$XY3iNd-zt2FYJwg+zI|SO=}kry9+M5ykF9e4$g%p{~$>%O0@P_{Qx$g6Fw!rro&R z9@?194W1iW@44%5pq+lL2axeirVBk#;=A`gBv;(gf|Dd*z!b~Rf>%Nl@V|yThvW!X zV3V@%y0!(r`BmJf|W{cVz(8hRqC$Cadu}YxiLctnIUj;^ZsE1{k;`#DaYGnd-s@u#Hcmr za7>*oV^Vx#-x|h)YgWeUppDEo`umz`udpK9`iir!Eu-{8=?8B{eU%KLZ{|HGiW%%p zG?xM*vylx%2!cd82lfD>O)_hc`Y=-KE&w%|V7^*fr1FqYvMI^`q?4C@aE+>jo@2XF z*50AH9W-~Sz6ubfbh(86SwXFjFjIC}qEZgHfg&#`F9#x&L~iNk3A5P}&p$WwOyS~1 zeT^OTq3(6my2knmkQd%e_8B?Ng*p_)YQ@=dEsB+>USPD#LBS;LTly5c;5++Og>BtB zj^ByM0VUec(FmYC5~a{nK|1<&-1_GLEGG-W&cjX}z&<@}@B60XIR7^uf805NT^mPk z4!;xj7`T;V9TKkP0DAY@J2&_7M!$c+E-mZp=eO-v?o{sHOZfRtcem=X!0w1pv1WI? zHSCds%e}go)ZZZA03CN09es@K3POV&Bo>n3BMZ2u8dmYFi;B9GvOR|4sz_yi0WH|TFLQo6vNhP93zs^jai*h%#IEE z77xJ}J43uvG1N(-_c7&EY?ZldL$|nZWXuT@s(&2)g~?HBA1+7P8OQwg<~W5p#+-uU z;GH?QTi+dN&@%o1ATCZYpVk-=K(fUGHYRNnIVfz4ey$kgnqYBL>0I}7QCIwP8+Xh! z6%PgFh7aq+xm-(^Rkak9Rkx(s$d#5<@kmY>+_~JVjU&1FBe`3zZR;x!h9Kx-UEyG%&8pH>(gb+SsNQ z{IHLk7jzj{)b^yN(7)Y6|01W+iV{U%1Q8)>8avz4j}TB|g=R!GJ&IqK^@>tOzhIUw ziY-AIJp)Qt*7YSQ>mO!yx71pSVvVJGYeny3CnQo)LMGzm3H>tfJlcn%xamfR%t|da ze7tj+l(U4!x~V|xSu-VpAGkUtEHn{?VcHtDK7S5|EsJtZyOkjSZNbC-3@-giT9jkaCz#P{qO zSSiS%G?#NSW+uT|UNKB?mL)}k?R-LNe+KzJ7ZiHy7D~Ut=^{61wziEbfw~)`n#N7& zftow|NOyGGq^=lj8OjO4=bwPV;7PuLk=Q`*SRXXG`xY>QMe$jfE}M0OzY1ML?-%83 z9%=K2Rput-4fPK&ml^s7Kop1QJm1t>Gl$Ib{`Y0opTJL)W$I{Li*|N%9mv7IMP+u; zLO9wK5>T1ROKfnc0nv2G+l;2;8g-ep4?Hf;mC@giCKxTZiOBSVrDhXx3iV3_TsbV@ zLrhabzkk@o?9bt6>@&_Dd+S1N=m^`4EtG{e#!by6Al_i^*T7}Xs9gOz^7Y>$kf%P# z)cy@1l0;mWedxeHi`*WP3~Wx9DV&VW%;;xPV_JU*5Vn(7*~!NUKF$nhMlVYN>OCqM zlCM9_bXuRmMV}?0pkhoQx0vEq6C5>b%xTa+M6fwXNpYl_h{!?sS%9RI?oU6Eo=SV^ zL^392>pA4NcW6+NqpBQ0kUI*~&^E+o%M^Z>@biyh3Kh26BE?@p?T>j>B~}w)rU{5F zp5#L;?xrA2ht4}tU%{lobisHbyyHIcZVb#k6%g$$l^nN32!b7%j2?n{3>yK8^QsF4 zIuRs05a`MN(H>eq*tH7*y>B%WW;$sIIvLdJgB1XF04f|1^WEF_W$U#=tNmdn1OxB% zhiSQH*U$&l0h7B!?cm*w&jtgXK?Np`kqw3};7}L`-1%;T<>-AVOwn1QMyO0dk>9xn z+fFdt*%ge0DF_&&YY^Es-=s5!=U^mi48r+%`wxXDEuRAy-}0F*7lS2W3svmda_(l; zhsg^vu#(zcTNpW)w0l8pV>#YSIdo$3(2Y;GT(Wbws*va#IrLGkN$tj#nr!kyak*Mw zs^`FPX3IL)oXgFb(SYT)WJI(bn+FwdmJ6z?dp%r+zoG=@1_s>B`6$d8jQ6E>K7Lv7 zNN^g;Hb)uRg~*-~^`YHg=spN`=|-i;p%m)zFS0L3_e)#Y5^WJ%~ z=vxp2O64d%Z?0~0Zth;Ovu{RRdAC1ZZ6v8dl<+O1a`FhkVf;PIDc#hM;@b-H7` zQW?gHn8xeZq8+`$LQyR&`2q7G;6&iC{(SRRl<2W&53}xTQl7}GHs+crK?ify3Z7bP z!U96#1M5FB^#vyTxR3>ZX>P8%tiKaEliA2#j{3GBhyGz?i)kB!L_;oB6q_vu5I&Nn z`C_9{uKAdg);N|amcQuZ3 zQwN&CwmR@9bn7HgCkb5!y22b^<=?9O(xV;WalANH!HeWKcVzn z$c~b#TB!y!B#AiZ)|wSqoxa2(d&MkTe7USDaQMYn%++Hjd2hL@mh;{U;Qgx-XN42$ zYv{zMZfQAF5T_2{tA7J`>%Rj5Y5mGC0Z>vZd92d?aO#=q;wK6)lqjcW)$Q-L>dq=e z4slsPnY)vOvM1^u=x$7?EoeMUG?*4xv*~+?8}XXka&`%>Asr=QV+i=$w%mLkmwP9l znFApIfbs>pAJU@ky@dsbuml4K%tdsJ#SCYJuc>)!8vVy+N%Zm%WQv$SnU2XA#Mq?( z`#*B#{3zDcs;u*FW93G^ztPn7Vy$3`;Sl*PJk!jfAa&M^wt&vzR`Y+C!{YcXU~JNE zAKhMD=)KdXKV*A_Nn8I88c(|U47{_gl_|ek!8pxR|1C?B`rB~P5y8JFV13OZ{T~4$ zdp5F*W`O^JnKH`J?N8Yl25T5W%^q+wu_0UY1vKTiZ6X>u_6FB}12+{Y$`I3FI0Ijt z1^2|3!K`B=y$NqCRdu3!$A)T4Cp@;0Y%HvL;9|qYVVd)>NnDufPzG-AbmAyUz}N=< z9LB)?;&b5kYY?WuQ8Q8|W`j;@jV7wt%sQtTg3#l*7-5!jk4b@C(}A2SgCUi55$NVxWmuhBBt2IQRw3M^OUSUpOj|2#T79D4}kV-P8@~ z3Wn!7v<6wU7w+F(1E!Bc&@7n7e~2bNNj99AR9`^ga<={x+}}%g&EN~I$~KW{FA5X* z$u|1}ASa#{7RbkY42`3>(}_WJ>B-J4J7s8{bot7Nbo0ciD=tcv~`HP_*wR!T!4Q8n=D12fTYsm9Z1Z%Dvr{A*b&{; zz5w<}L5_JKM@9svIP{laq&4=&2t=xMh+ZaO3t=y?ci?VB?m(Bbt2@H}&JI|N5FP+Z z+aLCaec=v#Q*>O0S$xw>0o4HK+$XIYKNZH_vUXcR8hxd2J0JxE;Q+=?i%AOtSt^7; zHjMXym}u@WZrr7V!Ol?F7wi;bMJG@aV@u6B#>Wceq(23Hj>3sLY^e-KygC>Rqs^{x zQ1$P|J+4`x|Bkz{)gg#*83mg*-~Mg;wR;dJ|LV6LS{TJU?J)0U-m`-31P4?~4N`l9 z#o5t^!l8w+Fsp`O6Wc)u3-&n3U50k+g$dN(DQ;bemLgd07R#R}!t-ar;}Ckm`BLC? zu-|1C0atehdqVhhBtNA_%*I7;dsl}y@?)}@_fafe-4*U!h^rVj@5rLFIuedxTtBF2 zVea)Z!rH5&;jpQP^8XVKQ?OQJi#G25FdT-BF;#J>w}$DnMrR$yYA3pRDU;V2>aL*l z5DozUhPSoNcJdTz!Ih*Z~voSQrtYn&_7Ae#d*)UEPCKc`zIg_k_FE z9?tvQ?u`?`^3Njwq0W8~IFK;0tM8quzJZ;rk6|~9x@J7!Lbt>>Q)a;m!C`=H+9u5h zWWFcy^Ctk{+8?58KSVqTakA^k?`g#PIO@Pe{{Su`BEWLxG`i6hWD5E;zz`_qrqg~O zWl|I8<8N}P4G(|iZw`O%+23v7Ij?D#xemL*E9~c^1Y*U{GbQS%ZEPXGiH!9_0F&;z z@TvdB8U)bAX(z@7cMwJ9T^xjIo7`$pc2u ze*<=xK8kM1Qh}qImxkpir^7~A%Mt6bQU6JAYZ_))?~#rk`cP)DPLmQ zud^&Bv0EZ3z-ZX>Qp2H9g&-uMVTn==f(u1P@n-n?kJ$LX5s<&>KPKSZ>n6c|g8xAv zl11MZl;OJi^8l7hLtO?2IS)p=EW~*l<&y3p5#;0cL%2WS(qxy#_aS%KoQR|J&NEie+q3H$Xmi{knk%57`2unJA)-7NE zLuPqgsVKf|oL})3bM-2^3LEcoty+(~6+rz9>`##UpCC0Ek5bK6P_1)AL@6KYE`w?` zVnH}2Uw@g^iE0Kb1w8}^dJGDB)O2B^;b~GF(k~0vT3U1&xnX?UN!+Dzr}v;viB)(%_71_;N}!YAmT0N!$`Iun?fuJuEP|8D9wt{stz)$ zuK*W>HY3K)n}tRR!vcs0AuJCu1cw%D?X*$ s0f?00~hi2MfJ;yGrpbKutl$Tzi* zE2+H$8L%aitVRij?{JXc#TFtq6U+2Z6Z{5ir2TS$z=jsAI|irzHF788!i$XL?39)x zr?jH~1>5`z!F|}15$wTq6!4^JJl%vsI0M-G4>=>C0-zwc!t8JBNtACx8j=92sF(2b zIj-$Uv*4}*Rm9sFuXy03T*d^Kkc%#e0Q7(tFAlld4HBwP*^dE z2+>Y0=el|A6pUs@C29AIw|?U`YYa5K``+^YJgyERD@}L9~*8W#ACd@4=V-)E4nXE%=idK;gkQ8q|jtULh?9%!?vc0 zd7A;DAJzKs6Rb#M!-SU38V0doH(=8tlURknnF_5wLzA*GCXhrRngcN+)V5Y*QeyB$ zN6q&X;1&T!44r9FdbJC&|R`BBhNXp+6MzmAFy%<$X1Fegs3@q&X^%x z%;^k3v%!@l?8Yy{1kJobU@4+YVEj&?ZAy)+M3^xIlr=W*)C9C2=`&0&48dy;X?Km@ ze8lp@`t<sN;v(O-!po}c5JVm$xd1oj6! z^o;1gi_wJ%n8Gm^>ZCfQ#@9rGM&1Z!@H81K(xVD-{oVAKI>UED?zlQ@QqYFZsSikB zu6h#MU^3XPJ{Tq;y+8FX*f7Ypv#?U}0)UC4jZ2hR^2_j*Le6U_l4JWyWM6H^>ld$F zm^$4~o9nr$v!~m;f&#qpaK4rhLX$gG_m9E{KG`0IXx^AB({Wwi%*_UcnHR2Y-0KX+ z0V`LEfj1_CCyn^F#j+MQL4mx2A3h(wknfY>!$VtY7DSAK2!NQFy!X);pMMeKegb{z zGXxYvOuGx8y0FeYz5YD}MS^n#+@$N=gq45w6?~q*c%fVWayRkDGu_0OpS@tr@c-^Z zy`1MRwB5z2$Mm0~eLnLH?9-(x&M?6C4<%!B6mO&048@r7f9*UZBxbG;6L7iK>Ev1; zC!qM=9(wl5%eiZFQAe*ieJ3LXWUa7%QXnVQ^29ii+?iDGG3=yesCJx+3c7DK?I^G-KC=lqzl<^?=n0J$$}C#KedIa2!L(%-6`txp zkG2*VwTJJ7GXgSw0K&cpww-~Wei#g$ES#}Wjy?Pr$;9Dq8uxz@Pmqv9nE_OQ1?z@JP^h8Bt2kG6##Fhl1pQ=FDJ3#*L zfkKKs2Q*(40hTkCiIM`WG-(K+aiQ{YL)(Lth=w+<;OBR3aCFA#r5E}zlAWM~w9im* zOf$v`*Zxjcjfh~G2K9vdPmQ6cac~<5jGX~+8`$YYoP;|uAnj#*6vW9<_#xB;d=p}M zfZa}9h->zzAVG^gi|4G8!6*%?v}f7YN37GmODO&wXSVoV+XScUKa1AM897 z3@z+ehX4<#$-9m^ELK0rZm9bxt!3cS2R;s~Ah_wnor57_s23hVFM{%7XFBM+=C@ph1RdSBa&qZ2djoma3G<(!$(&FLd;r%iea5*Zwi@KSEEB5f%C#{Tk4*a#qM3Gkhz zdc(gSc}2K6w9xMpDANb9j|{>7${&N6LP-SGnT+KT)uR#fi2(O%u~Jy9LSXX<5&13| zw-DMX`+V`1-ySgo0w2-3OrJ`z&cSVpi;(yRF`lDibg*^z9p{be|xqA4cA1Fj6s~$@!87fDq}9<(m(+dty-Q=UHW|<_rPEAxG2>NcK6r!p^NoA`yrczgpkxcu>cP&JZ%V2 z7x4fAz#EddJpEVLK(v8Zxo~B6^3?~jafih1-V2YqWFg6<|zI&2@obr{Zj*!2+lm9+PO)u(N}%Kk3_OqYN>8=oCWZSrdr z_Z-Np&f@L;WP~Hlkm!T z@N6MwUM1)iQpugDBXRnD9T?*}S$cIC1Qe#>o-_eKh}d8B^xFso+OcNgAXcVg!+!&f zO0=v`Yyfm+BX80mIW`O8+SQW;WZw??H_( z1>aO$WyNJJxjfO?fK~{7ENgmL7=gbhA##KZ{pv@9K6r}aYKU4OIaAQb;2`j$82b7` zFy5I^yBKW5lcC6!=c43}l;Bi$XSc`&V``j-W#O>(3=br|gfzi6y^)#TCN|pamNqDT zpEhy0WcPGk_&6}E%EoODqhSfly73_?-=MoAxDmRWUFAUJIBSpcGJ*wGX+v>^Qwg35 z*dT_#gswMf?>-YAQ-C9Oi|>LvQ4Xr=Dr)O@@l2z?k^P9X7^F-bFhZL+SZ+S~bQ9k= zFNAE#Mdt6l@dGCE?$fvd)2g6BG>G^bP=aC&@m7@v4__N);U`f?zen)n0PXa3oRTwP zPx^l&<6CBi4>*b67WWPj1_F`wj@NbXAb{jO`~|^De`a%Tz$%0QY|hSU^zvK1+3_IE zw)ej8?DWoJw$oR7;os-}y2H}n!3mZz$bx-h<5m`oL;CHlLz>(Z?UZxHQ^lnUCry}< zuU}`opF?f^2Eji7ka#ADBX2U}&j=nN_#DA?f=$EI&+zv11VZ11eEuWe-XZu-g0~1r zQbpA=wn_4dO_mdR3(aZ+w)HE_CWVDygb9*q#=%EwW(h3%Ed55m%sMjYf6mk)fXJ@j zZ^PQ8wK6Z95*$X8fG%R}KjX|CL#E|qpeOCIoo~h^k{ON=I6)49pe8|FX(><2Ns=@O zGJpD$XnAjl9weOPK3cxntlcb6mHeCAmNAWvNoX(O=aXj}gBR@P@Oy;V_Sk_UKH;E- zOW}#fAQn8BJz>tmohOG4J=BZA#h>bWLL90vh?Lwx&S7U}oDwY$Jf%1<#JP))hQu6TOFn25=u%Tly~(J-<$z@vx#IdKy`W+#J(IVi36e~Hkv5Sh%|FrpzSO^x=!v7`wi_&Z8!J9oPL!3 zp8$X&A~LB6bGS$OQ^!x=bIxHf8=eTU#kAvy*Mw~c{qSuj$BgHyhDH!@@gEumYz6%a zJHAE0DL3-ohj~l75yfHSK;W!yu@TO+>1!U7}@(| zk;vdDWTJs4?`s=(#PSVt(Io?PnMwGU*u_1N3z+e^o3}#nxOCH4z%lqjF2IOgTX6eh(w>nm859%2C<_14U$sN zCSFMkVY!O2q+tu+XcbB;#1ajo_Y5LE($E-rzA8gG2`9m9^f4l5I!)OSh>NkFk8x|? z`00;WH)e5oe&=EuL;~o(ihu5DPtek$#wDzKBJ9VO2XQpk6@?nF_wzvgjoDZ51DT)| zV8r-+AiX(i_ihj!wO2@oW8^m>7W-z5k}TV6KjzXz0JbN?VJ(hN%vSr$SDj-d(>p*eQU*<>`5h#6+z!-i_CeDIT{7SenUBNst2AROzz17-~k?l%AJ0D2tu zez%?3`D~+xpl=DJy>*PfF4h`?eGp+CLN~q#W9Wy@)elhxx}I1*uTX5cZR`IDj~e`O zjST!=Sx^#aUU&{-^*~1$83@e{v}6Y?n-n@AiiFwDAndcq7kw%V4RMSqH4b_=g1E#i zgpvWw*07pjK4Qflzws5c+C>_1<13&z)1WvbLUDco6lV|=XDk?1yTyOCCmbZbp@>0i zF6hnR>w`jXra^C}L2tMcq7`C|#?8#Dy`VY!;H3ji6Izq&(i+)@o3uu_dOrS~geSEJ zr**lUy9SdXqLp%oo_%_?V+S{UO>yH5oL{9^*$EU1MWzup8H>fVDF z*B-mo)HRixzH+VIH%H@*FP<~(;ZsEYGC=kB@W)a@De?(Z#W%0{w z_dTY*o!~hAsHD)9CN0@?Gj-D&aBD+Fx-#*@{XAQ>0g{2H@22*Bi!9BR?9Tp`xK zWvn@0X1!k_5Ut@S*@rV%6)GSE2&ct-Q8Yx5i0r_Getn%yngHkw@2}tEqxLgXfan}U z-PtH?PS7CfxY0crhQ+T3vkr|sJiAAKOBV9P1SJX=(K@1+90HlppT`5*2xX(Sw{n&L z(ectZ&R;Jv=%V)_$w=UL2|xcZ>eD4djRSGv5FxOUBz6a)0_1*u6`S8|dT0h_(|0`O zF$ftbjhJy5YwoM@Bo0&G-zIe74Db*ndtZrcco>FgGGZt*3CKd~Hns!~95Ub;8wcUY zOku;{MijfGah|N3n=-k4UKrP3RUV8r_M5I89EbA)0>Z)2S?GfUKaFpnFnIj=3dslv z2V#fto@t&A1NjlLMp{EFg41CLPKPXl3{0>{3I>&d89C%+qBO)jAtE;>k){V;i~*yY zCRgp{Z>__Y*J1F*ziz>Ij6a4;%HQ}Dnv0O8v2s)EG~2QQ4I+56LrfatT@p z|NZV5(R%_9VRT3msC^2GG~J1pMUftIaMI<=cw$e5u=THTM6UifxahADknW)PCnSGH z|94z@8kqND4MOEtdujX;mHyl8&e5O2#gun#;JEMZMOL4@6eS7;)hrbXnyV=hsFBn3 zC8Uqb70kmBZJf=SC0)bKlt?x54@wfNYLu42O#?Uno{bV;6gO8x%8`5Jx#x{>I?l0j=i-1l|Km_|SL+lr`iO9B_HH1@kDT^*8%=_OXI)hL@CSfGM)Nmu?0=_I^V@cM5ZW?qZj4IO+CV# zAL58_5bWXYrwE=Qm?rpDg5MzcU4lO$NK0jcNrGbp;zbtQ^e1`yV+5ZiAYl`v*g`%x zX=w6Eb71Z%>Q4T30MDbbn_)-zis74nYV)Oc`xtCE8JB0)({6Ss?q&zFH-`44Q#e7J dcI?cB^k8x@*`I#Qvxgs>*qh#+o=89N{{g%u@)Q67 literal 0 HcmV?d00001 diff --git a/utils/__pycache__/torch_utils.cpython-38.pyc b/utils/__pycache__/torch_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78cc54346dc0fd963a56f7ff8f869505f09ef805 GIT binary patch literal 16799 zcma)jd5|2}d0%%=PtUQl2NsJxfCbouKw=jJ_5eVNgaD8rE>QxP5Dp$AOT*cDy}L6z z7hd-Q*l};H;!+V(2`t%3IgUu!yQ*>$R;d)lRdJ-GqNGygq?|~$Bj@Z&IgX_=g&dY$ z`9l#aiSqk>J+l|cRA#DQzkcs~uix>#@9vx3-B|;_GyRuKZ(T8rAMj!8FO7#6aQS;J z!%#-vP^Pk~X5N&$mAB-c$S34(=WV$s^GUfoc?b7IHC0RJ(8bVR``Tr@YW?~Cc7AuQ-!e?2Hjv*VB~Ep)Hk2Qhe5yK9+ne88+n3*0 z8_kc__UHH44&)D*hWD6ft+^_#GV9i=nSZ?foNYfJY(ICYoYa4;m6I_X${%{osPDB6 z?}^3iS4@qu^RKM@p*M`W^}69Xl_yoV>QTM-Excu)*IhZR`c|zuQ|(gyZ=3lsbyE$f zJ;;r#K{fQYkw2ndRU>LIN}lr4-eYRtyQUgd``@-PuA}OJdJHhf)NAUX`WRp))FJf* zN+#9o>aZF^$&?ybM^G}Yo>E6~Kdz3c3EWS3{gsn=@1&Xn)YEEO9Y^Ui>J4>LJ&lr& zt7p{5QSz+Ht5fO|D0xnOR-IMPBX>&uoI0moMD7#nbLz7CByy+K=hdgx739vSf_g<= zMeginLw!bFd)rXgR;~Q=c)G4`;OPdQUQpI`WBkolZ}#f)jfO5QxJyB~>SrGkjb_Hp zsHfDZ2j1PFR;(B2J&lsI^1NoL(OgE}zFG_xa38u*_5)p>T?#yPp%@ge7Im>&^{V5^ z$hq>$<;xeZMct(Zue4aW?P4!hVFS419vah{z?aGD1^_TJ&ADS3tV|35oyl~Df`)<7vxW(JWaH^(I_Q0-17s9y3bS zJm1HRTgmB(=O(5~d{gI#!(=4dm*0oXmkewnj@H}h6*Dxkpoy7n4cM5CcH{B~k=!$M zHZUq?-9~Dy8lka@X78CZW#v@E-H-8nybHOMj3%C zOc$Pe&-mcw$%RJEo1AYJ{h(N#oG2Bs+>-(^xv(@lxl|3XBg;X#;W?@gTY8D3rcqsN{phx@j6gV$EJRuuINN%bA{-o|>4#a`plVylTO(H5$PJ za6og@mL5a$+PP~pFU?$b|M;tGKmOL2U4e5q;M~@btG!!GW$pQ{Ecn?oai`-cokDZX zU5~m79n>!rBCB4H`p)Zy?_aD38b|30vf3eB>P}JD`T7HFDk~?6tlS=1xt$G<0n!&n zVCBr5nKt`PeF$ZbtQ%h->-H&RI_st^mAIea+9?pTx6F2V66^U)fOQL!^Jb6)lyl#{ zZfq$@-A`NxQ^7i>XFlLM9-o-H0qk^|<$B#yt#8X>oZ(`iCSID`Wgh-BXQn2mPoV6= zg{v;`vdjs0&ZEE7Y&JCLh^v-7v~r8?V!d&vj(UFyhxBUIEBcGpz{g$y63qILD2M1O-_Qi;3!O#8nvy*VKP1rlhVCE`Rs}3o))Z_o|wX6%(kgW zS@}_SV<~7ZkwB5KL^+%^oF^X2<^u4YM@5ez8Fx1KUAriVdd~SEO#!BUQR{||5*VBA zLJjaP$RNW z{LrYGE8ug16(o2lu(1b1qhi;s&{#{ZgYqbQ#P~UDC9z@$PH2Y~Ao8JINmbIJxtP$e z2bs03Ja>iWx)s`mRQ(SL8DuJ1z}`b$4EYy>Txf)eHoQQuCQPIL%{FxRss(=htV$~9 zZ6mZV;N2@p_7Wx+9sNgPa;=9?>*fl_;fxsVe*ZA;QA*ll2%XbL9V4ssvMqaUx9;82 zx{u>$oxsIC?h4Iz-~H|T?)H5^rS2!0KixrVp#ebY^!aMh_set3nrBUOH?|!v?g44n zbzJ@cl6jDy&*fHdm_l>a{3Yuaj!sM$!86H`-m@ZWVk$E4?%eq7=_*($aI|?A5?xFZ z9Wam9G0>NVV$&;>;<1!HU5F8KlLT+J#h~rka?u~(6LkxM`NaELqpqjWYh?2!QEH*+ z7lS~{tfHQJvF52lX`x(I;5$(kXSEI-2M3BAa7C}Eqa5B?nDdIk5=NsRXIIXZxT>pM z->8XFzK6pB=AOL{(&*KLaHPxByvq;dx$vojBlIWlWtnpjjnGRr&HbL1nJ?HoGtNhiT^%tK}tq;mjQ)*Lo_ z@aJ<6@+7H!rhXZ4@0t0maJzyo))%*70GS0&qfx@;bEG(F=3+_@lFNt2JcNV$U_Hus z6_Tj1CJU?ybtk>>pFtT1z47C_s5b}=sUQ;=Yvwv<m0&~BoyVc4xb zb;yC`;xWHgc8^_gL-$U}Jyvb)-&s6|_qdZKck;G-@Zb#!VE%$vtwQoGHi_r$Qx+vf zfZeh=*(lwt7K6Elt^rAxAx+iv5xhA{l>3!%3Qo#@|xrgIJ1dTP3_#$+}8OQ(SMs8444)W_DO7{W@%*HC=mlYziwnG6v#gQ z5q4F98m$`5TwtzJ7_tIuEwRp=>>BR&gVag_lp6cg*|oI{Y4y5=dy4lJ;%<7;T7h7o zGPr|Y`kxPzx}vf{w$i1#K$Tw%4gGZp4((^)L~gBn-O%q!u4ml{dck=zD#spI&Bdhu z^`K96i-^|qAVK^J?XQ^rTVaxEr#t$ft@J<$ zEv}ObQ-cPcz;%GTeX2h+G0)v$>K)^O2s_F~d%RKg4H*N*N;>FgZx@Wu&43?fR$^c>!{kh zlB=KQeDrSygMqE~snJz(rr*RHhAP8cA3lW%wV!QN2JksN9gwH=N-pFaz8faiM%Gau z>|NUz#=@+bHy4vWhjR!^e)rxaqb|9~L3l!y!;dMf}>VMAwITI-#5R?w00p+cAGUGN*Je#v(MY z)iqlTN(;wG``y+)(a~y9U&_s@=y8{?-gJc&U>Y3esg~uAePA7R%ZAZsv=S3jbFJjd zO}{9*ByDR%`Q(QLgM4Hq^@aa3-o=U5JDJJYSXZ>Pt0` z3Rbjd-s^~z1<;Eo%8qnqnHLupt%TWrD!c^0<#nPfzSH7MjltVST+>G@H z8|qnuG=?(jc()D-k(DKl*=efo-61!ROkaDbjQdE%4SfAc!h$*{5<)ji}M5?e~5p|S!$zKjP>hW00r0p|@! z(3F3nkgi%jR4URIoK~@`fg&W7h16C_q>iL1Nnzna3G7<$fCjHpMFpJ=sDiy78g=q=P)p8S=h>I##E6eUc7d@F=CAvD{NH$WNFp`&u><7=T4w*k#^+U1bM z_3xu?5@XFUA0{hV4J%puKc7fI4qZvDq*pSngF$ztCv?Ij3?IF! z?|wq{jDXUD^7Vs`W@GzX&@1-0C)@V7o|SBDfBTBr(m@~e3{u+PTImXQ!2*&|J*N%! zh!xAVOLm_&TF+vXInEIDF$ns_E{7884y)h+VQ#G-62@*&#ZEu{rwzRSAmohr{ojb+ z9}K9$RTJ+)eLRjZg{R#k^6n91*ib_QMriFbKyy=JnvhZlh;4OgQ1p{nt37LjpxTyp zBtHZh2lEG4>RIVl!>}G=b;I18#gT%W^sMw^CQdM18NpmEeF1YB5h!PE@4C^M4_fy@ z$?j8o-?782+K04TjUr8{{YcYHgMDEScowh)x!yPz<4s6z&=+RIZpf*X!*ylDX~YA-i?&*5^a$G2@T5 z{)^jYGJezZ)MQMbCNEui<*HBn8x49o>*9BlCbUzMwL>d&<4*9%;Ap%k#BHS&iM@0(%^=_4>s1@|Bxoo5k(1 zjLCbCx}U90A9ZWTKX=rvpP8PT8h71~b%yIUV3>4K=$2s=gI?dL`!bTeLtAFeZ`o(3 zkNc5xS!UVFfen!^yJMGM9B180zmczUA1mq**VsjZ6ts)qg8!N;>DVi8YK_1Nt#4sG0&PXZ7So&Fj&wz}70{7T@=G^e= zJ`5+xUf zK0a}BzU2TPCQd*O!R_Sy%=m%LsR%FtT(x0Z+vVEP>?-+ z+f!|I{^f>R!V+hjM0*d1!Piyx8WFSqb>V?Q$hREsF^0p77az5!()6%O1H@9+V+1Ib3n0xE9 zJ0opFfJmFi6MMm)9dj@A^a*ni3K4~iti{@_2o#oi1PW3gOlPAIN)7;K%shoWMUhde zPV9FZictS0ik!#g(}oSI4GK=Atq5nY*mKsR(>YQ}Jq?(6?b2K`x;>)YRwsz2y;>h- zvK?7r^zaLvHot@V*wSw4zmNPQgW^e-K~e6A2c>&p$itZlH%@rmHgaI|!7hIIOO?0r z(%;?k(r@p4DOX{`n)oIjpm~9)fM;LBK?aEdr?x`qk(C6b2KQE4@;36%D%2KWG)C&Q z%Ts_$FGW zoG~&Te{-!hS0d_qk*dNB6mZy+5+1NBU4t8>9xTo;tHyhF~Am+tE2o=}>G_~#- zpr;W2wiz0Q=3r8!^&-m4Nd#4MYGOi+rzeN&X@LA^c21es!9Q!XT-rtGdIBE#Tj>6V zc@l(r3E)vz3{zkQ%4na{|D1^|-#=rHFNtznJN!>5>0moCe;c;PY^ROwr%_jXys5v7viDEO?+>2ew&UD1E-8`KuOopy-l)PJ zFTQX~(gbbun3ydmYYG#k9UgWSP#g!_>V{S&71}&YbkqRWT1)eJ9qn$pw2Z@w(M1{+ z>lLw+OU_ejMrIv3F}h+UM6Gm+HQLfzeJ|7X01;>j{u&5ZF0|(p^Q}I2zS@{AR^85| z$8-4su`5nM#A^Ep*(RoXNhu*WLs8TnTah~lR#-TvpYN?s7nWd8)=MQ?)fCiLsI`!C zxIp^nnQ$S&_637}j|KmV35P7GyJJ(N_P?XDZ4Vn3t+J0qo$a=51j!inH`xdW=n$J_ zE5N7DvsU}PZ%R=Q}cStI3z#|$R1M9{ki zM+3@tAqNvvWjC&YA!FU-AEq)8yR|*28-#Nq0b^PZODaRG<wspji9$OBK_}G z$#ocw`A&Gu9}M5c z`6!{vw#N|LicGi-rU+u;TaPWo!D7#LW!U;;zcnsr13Wfy}PuXpq+b_8;ffV^|6IL1Lw=ZOj-JW5!=;8#8vVbj3yl8U5qZ_csEb-@U75 zWgpfCp+M^#{~@f;=-U3!fr0pm-~d*}SbL1~Tct7j$uNQRaA+eP3%g*LhOwEBBAm|g z?NKBh`9V~wL14uHL)Fvop#CEj7ij;R3Q}u7h&7yTG#qZ>^Bw(e8)x1&1X|-TgaK-s z6+?qTaYyOk+NyI}`qwxVVYVNou=R610aoN)?>+I9I#KvR+t;E@rQ&F0Z z{xT#yR6Zwc9Ocu;<;){@dz+pxmc^h z=>|p0iQ^0MPHZxXlEQRbS-7xjh^JXT<+d^~b6~#yQMc6%D|tun!eiF$%-0`xMOKML zmj`^WNMC(1zuj!58?4)0b1KuaJdrIl54LOtqd!8rM8O1EEZ)^`5QrQgrngDa@nQnM z&eE?jNiY$HPF+(|O4C0^0{TkV*CrniSo05&jOSuz{!4^PmTU8V3+94CGmm6GmPv)X zud;zKAgf%{-z2m(=kFx3*-bp{CBnqSbr3q8ZJrfRt!*CqSvp}sEISBY?;y&Kh(378 z)c+fuzK;vPKYL#PDQ+cBpz}*1lg7n4`d>kE4*^~9ZGucuy{f=pri?FI5+`9JW&s|m zd*%QRF>=W`Hz$OU;sorj5DirFz72nwwt?02-Bg825L4ab)=xM%ExU~muQ?~x;n-Vx^q9_f{1YY4*^=KUxsma=%C zM3MLvbUwws;o6GjXw*k^m{-8we+X@fPZE#U{O|qb`VW5No&WeeoS;egh?YDEg2Kp& znvr>l0Rk{aG#e00z{?@0fN3Ytt)@7a&-yS{6k@Yb5(SaD5Se%MFQ9QeJR60v9+KG@ z(HKX{_`R4#_XA)0flJWcdI(MZWgC(Z!?}d#afuy*bdNL-D)A|BMcGc8we>-4p|zOu z5jL}yP>}PO3lmUypS_j61-BxQ1ckz+}1r_dVPy1)Lb1D_`?C z!JQl+_zwDPu%KTcpriiP=)7PbM*x}H4hh8xE^GZy0?5;q?)V*|F=lWmv-cA#>3Rs5 zF2MYW!1U~Z$*FD}<#hd3K=sg>tkVLA&?Aml`s$T_P6DD&AdU(|-xdgj7E3RE!7jL< z$@>S|&h9j{1{pQfx;^h&BAv%&m^0ku?XqC^+CZ>}P7Jgig2a=Cq%nm2aA+YN35L-N zT1fp~dBRG_(>{?eMx_iRma_d1dg3>MxE$CnX^;vECB$6}4xklnbAy7v31|!fs!b)t zWE6YUEqZ*zPHgtv7tj(2My5TXWQU4Iy{phzEcTvCi! zVL&$sEk!#2)RzYT!|(p&$Il~z#HvCcn1!%rT>azym zhkn)4PodQZ{aa$A+rgkwDi+%OZAJv12u|wLxcnxPP6#c?&H(H)mP(xNl-Vd7ld>dy zm;)eA3G(?6azJSPVkZtNi8!cA+dH`h0+NDNhEeOU1JZ#XZX-S_H4}%UPv3}o{l#(< znlFMeL{HaGU_kjS%qxBXlYX-SdmG}&J>=muP>;r>CJ+mYI6%~CigSp2ve_)QggNwF z%|9Z6^~9@=mIXHaEEhfIpR~sFVn`AUFyt>EWZR24(F`6QML~)Dv-9IZl8Xoj`5`ij zL7xz_K=Beobig7Y33+2NqmR>@6+a8bLL~Qy*g5npwMgZxo5)$5In~>1z}Mm)V^Ar;5Ii%1(CWi+089*r&j3*0wsX55n1@?K*9a{AcVf2Z zbff0Ba<1HEL~hF#OCrY0*p>D$TZ1lQaNJqXtH-^=-U#$`TO)0N3HK7@*<- zD@kl@^PqJAPcBY}J`cDOT5tQ2s=RfQm>V=QjsoszFQ&p3pN=TZGSKjFy6F zj9#3(1nHY=fXE@@WxNEk$nbF?K8Z2G-A#P7P{xNd*b=f%sDU0F*`GwSozxl(fWDJL zVTCKjqi?`V;$V$hgWD!F(E;3M0i)!(K`Z9)0Er7K*m>(b@o3gXYy?D0?B!H+Bj z;sy&|2MITbtqU~k7^YlUxbYp`9KKKU<6xUhz#5DW-`oZGn$CCqEuY=oxpQYCknjye zAl#W~==sTFvs_S(lFy+cICV0Zn-uOgxn=h8!x-p+H~_@be^!-h*@~ zML}_C3s*^>j^mxqzKhEL0XU=eu@0JSkj5>lQ5*Wwcse%DeH$$z+rxQ}*`jbkz9_aD z{(Ds8yvlhLC!18g&U%4dMSVP)*H|Vef#>8`akZVE{Wyknv9)$RGOlfWhx+3MX`tr$*L8Ah~j za1MSU5GzmgN2w(Q-fpux5v4blasUr;umHFnyxyyJ>??@lyJ9nr%o0BB&3*3Y+;=nC zBz7Xkbj8Cd&?fYCj-9fDVYLQ!j8_2gajL$~PHr&4Ma-5C3wXe%IV8FIDwEG3iBd3R zHA>L>gw}qZ<+DsS#~?XDvumjMWn3a0z+aZkrqemiQk)pk-c5-y!w_ciVtM-wy#6oY zfkgj%GMY>JA@2#XlwJ}mZ)b>n71o5N0x@vuVdh5o0!vdn(Z`s#XY|v2cm~Oh@pHPw z2cBL{g-_zdb)LB|A&Jt^k|?nHQ5JqeekNTl&uXqA{fW(GIhAy(1`DjF}GAN(Km~4sU?iP(CbbzLw7;3Li02G9dP2h1X! zwa_lX=p51%K;=_K`Fu*gHIi_6iGPcu)V@X##yjb6Gx=>MBpUj6m@pzBjso}*b2Q?| zwpp4hMST}lQzUC)(SkA`hG9|qbWLJe&T@PFH<8$O9Hia}RYy1nOa*aJN6s3x7^dE5 z(VGCVBhix>NM|$YuI!*?_GQx9?4fijdm^30Ki88?XR_J!aCRh{%Vx3%kPh?={a. + """ + + def __init__(self, c1): + super().__init__() + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) + + def forward(self, x): + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x + + +class MetaAconC(nn.Module): + r""" ACON activation (activate or not) + MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r + super().__init__() + c2 = max(r, c1 // r) + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) + self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) + # self.bn1 = nn.BatchNorm2d(c2) + # self.bn2 = nn.BatchNorm2d(c1) + + def forward(self, x): + y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) + # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 + # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable + beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(beta * dpx) + self.p2 * x diff --git a/utils/augmentations.py b/utils/augmentations.py new file mode 100644 index 0000000..7c8e0bc --- /dev/null +++ b/utils/augmentations.py @@ -0,0 +1,399 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Image augmentation functions +""" + +import math +import random + +import cv2 +import numpy as np +import torch +import torchvision.transforms as T +import torchvision.transforms.functional as TF + +from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy +from utils.metrics import bbox_ioa + +IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean +IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation + + +class Albumentations: + # YOLOv5 Albumentations class (optional, only used if package is installed) + def __init__(self, size=640): + self.transform = None + prefix = colorstr('albumentations: ') + try: + import albumentations as A + check_version(A.__version__, '1.0.3', hard=True) # version requirement + + T = [ + A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0), + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)] # transforms + self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + def __call__(self, im, labels, p=1.0): + if self.transform and random.random() < p: + new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + return im, labels + + +def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std + return TF.normalize(x, mean, std, inplace=inplace) + + +def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean + for i in range(3): + x[:, i] = x[:, i] * std[i] + mean[i] + return x + + +def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): + # HSV color-space augmentation + if hgain or sgain or vgain: + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) + dtype = im.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed + + +def hist_equalize(im, clahe=True, bgr=False): + # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def replicate(im, labels): + # Replicate labels + h, w = im.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return im, labels + + +def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = im.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return im, ratio, (dw, dh) + + +def random_perspective(im, + targets=(), + segments=(), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return im, targets + + +def copy_paste(im, labels, segments, p=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if p and n: + h, w, c = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + for j in random.sample(range(n), k=round(p * n)): + l, s = labels[j], segments[j] + box = w - l[3], l[2], w - l[1], l[4] + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + if (ioa < 0.30).all(): # allow 30% obscuration of existing labels + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + + result = cv2.bitwise_and(src1=im, src2=im_new) + result = cv2.flip(result, 1) # augment segments (flip left-right) + i = result > 0 # pixels to replace + # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug + + return im, labels, segments + + +def cutout(im, labels, p=0.5): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + if random.random() < p: + h, w = im.shape[:2] + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) # create random masks + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h)) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def mixup(im, labels, im2, labels2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + return im, labels + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def classify_albumentations( + augment=True, + size=224, + scale=(0.08, 1.0), + ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33 + hflip=0.5, + vflip=0.0, + jitter=0.4, + mean=IMAGENET_MEAN, + std=IMAGENET_STD, + auto_aug=False): + # YOLOv5 classification Albumentations (optional, only used if package is installed) + prefix = colorstr('albumentations: ') + try: + import albumentations as A + from albumentations.pytorch import ToTensorV2 + check_version(A.__version__, '1.0.3', hard=True) # version requirement + if augment: # Resize and crop + T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)] + if auto_aug: + # TODO: implement AugMix, AutoAug & RandAug in albumentation + LOGGER.info(f'{prefix}auto augmentations are currently not supported') + else: + if hflip > 0: + T += [A.HorizontalFlip(p=hflip)] + if vflip > 0: + T += [A.VerticalFlip(p=vflip)] + if jitter > 0: + color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue + T += [A.ColorJitter(*color_jitter, 0)] + else: # Use fixed crop for eval set (reproducibility) + T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] + T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + return A.Compose(T) + + except ImportError: # package not installed, skip + LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)') + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + +def classify_transforms(size=224): + # Transforms to apply if albumentations not installed + assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)' + # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + + +class LetterBox: + # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, size=(640, 640), auto=False, stride=32): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + self.auto = auto # pass max size integer, automatically solve for short side using stride + self.stride = stride # used with auto + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + r = min(self.h / imh, self.w / imw) # ratio of new/old + h, w = round(imh * r), round(imw * r) # resized image + hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w + top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) + im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) + im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) + return im_out + + +class CenterCrop: + # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) + def __init__(self, size=640): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + m = min(imh, imw) # min dimension + top, left = (imh - m) // 2, (imw - m) // 2 + return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) + + +class ToTensor: + # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, half=False): + super().__init__() + self.half = half + + def __call__(self, im): # im = np.array HWC in BGR order + im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous + im = torch.from_numpy(im) # to torch + im = im.half() if self.half else im.float() # uint8 to fp16/32 + im /= 255.0 # 0-255 to 0.0-1.0 + return im diff --git a/utils/autoanchor.py b/utils/autoanchor.py new file mode 100644 index 0000000..7e7e998 --- /dev/null +++ b/utils/autoanchor.py @@ -0,0 +1,169 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +AutoAnchor utils +""" + +import random + +import numpy as np +import torch +import yaml +from tqdm import tqdm + +from utils import TryExcept +from utils.general import LOGGER, colorstr + +PREFIX = colorstr('AutoAnchor: ') + + +def check_anchor_order(m): + # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary + a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer + da = a[-1] - a[0] # delta a + ds = m.stride[-1] - m.stride[0] # delta s + if da and (da.sign() != ds.sign()): # same order + LOGGER.info(f'{PREFIX}Reversing anchor order') + m.anchors[:] = m.anchors.flip(0) + + +@TryExcept(f'{PREFIX}ERROR: ') +def check_anchors(dataset, model, thr=4.0, imgsz=640): + # Check anchor fit to data, recompute if necessary + m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() + shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) + scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale + wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh + + def metric(k): # compute metric + r = wh[:, None] / k[None] + x = torch.min(r, 1 / r).min(2)[0] # ratio metric + best = x.max(1)[0] # best_x + aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold + bpr = (best > 1 / thr).float().mean() # best possible recall + return bpr, aat + + stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides + anchors = m.anchors.clone() * stride # current anchors + bpr, aat = metric(anchors.cpu().view(-1, 2)) + s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' + if bpr > 0.98: # threshold to recompute + LOGGER.info(f'{s}Current anchors are a good fit to dataset ✅') + else: + LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...') + na = m.anchors.numel() // 2 # number of anchors + anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) + new_bpr = metric(anchors)[0] + if new_bpr > bpr: # replace anchors + anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) + m.anchors[:] = anchors.clone().view_as(m.anchors) + check_anchor_order(m) # must be in pixel-space (not grid-space) + m.anchors /= stride + s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' + else: + s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' + LOGGER.info(s) + + +def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): + """ Creates kmeans-evolved anchors from training dataset + + Arguments: + dataset: path to data.yaml, or a loaded dataset + n: number of anchors + img_size: image size used for training + thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 + gen: generations to evolve anchors using genetic algorithm + verbose: print all results + + Return: + k: kmeans evolved anchors + + Usage: + from utils.autoanchor import *; _ = kmean_anchors() + """ + from scipy.cluster.vq import kmeans + + npr = np.random + thr = 1 / thr + + def metric(k, wh): # compute metrics + r = wh[:, None] / k[None] + x = torch.min(r, 1 / r).min(2)[0] # ratio metric + # x = wh_iou(wh, torch.tensor(k)) # iou metric + return x, x.max(1)[0] # x, best_x + + def anchor_fitness(k): # mutation fitness + _, best = metric(torch.tensor(k, dtype=torch.float32), wh) + return (best * (best > thr).float()).mean() # fitness + + def print_results(k, verbose=True): + k = k[np.argsort(k.prod(1))] # sort small to large + x, best = metric(k, wh0) + bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr + s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ + f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ + f'past_thr={x[x > thr].mean():.3f}-mean: ' + for x in k: + s += '%i,%i, ' % (round(x[0]), round(x[1])) + if verbose: + LOGGER.info(s[:-2]) + return k + + if isinstance(dataset, str): # *.yaml file + with open(dataset, errors='ignore') as f: + data_dict = yaml.safe_load(f) # model dict + from utils.dataloaders import LoadImagesAndLabels + dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) + + # Get label wh + shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) + wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh + + # Filter + i = (wh0 < 3.0).any(1).sum() + if i: + LOGGER.info(f'{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size') + wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels + # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 + + # Kmeans init + try: + LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') + assert n <= len(wh) # apply overdetermined constraint + s = wh.std(0) # sigmas for whitening + k = kmeans(wh / s, n, iter=30)[0] * s # points + assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar + except Exception: + LOGGER.warning(f'{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init') + k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init + wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) + k = print_results(k, verbose=False) + + # Plot + # k, d = [None] * 20, [None] * 20 + # for i in tqdm(range(1, 21)): + # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance + # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) + # ax = ax.ravel() + # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') + # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh + # ax[0].hist(wh[wh[:, 0]<100, 0],400) + # ax[1].hist(wh[wh[:, 1]<100, 1],400) + # fig.savefig('wh.png', dpi=200) + + # Evolve + f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma + pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + for _ in pbar: + v = np.ones(sh) + while (v == 1).all(): # mutate until a change occurs (prevent duplicates) + v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + kg = (k.copy() * v).clip(min=2.0) + fg = anchor_fitness(kg) + if fg > f: + f, k = fg, kg.copy() + pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + if verbose: + print_results(k, verbose) + + return print_results(k).astype(np.float32) diff --git a/utils/autobatch.py b/utils/autobatch.py new file mode 100644 index 0000000..bdeb91c --- /dev/null +++ b/utils/autobatch.py @@ -0,0 +1,72 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Auto-batch utils +""" + +from copy import deepcopy + +import numpy as np +import torch + +from utils.general import LOGGER, colorstr +from utils.torch_utils import profile + + +def check_train_batch_size(model, imgsz=640, amp=True): + # Check YOLOv5 training batch size + with torch.cuda.amp.autocast(amp): + return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size + + +def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): + # Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory + # Usage: + # import torch + # from utils.autobatch import autobatch + # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) + # print(autobatch(model)) + + # Check device + prefix = colorstr('AutoBatch: ') + LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') + device = next(model.parameters()).device # get model device + if device.type == 'cpu': + LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') + return batch_size + if torch.backends.cudnn.benchmark: + LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}') + return batch_size + + # Inspect CUDA memory + gb = 1 << 30 # bytes to GiB (1024 ** 3) + d = str(device).upper() # 'CUDA:0' + properties = torch.cuda.get_device_properties(device) # device properties + t = properties.total_memory / gb # GiB total + r = torch.cuda.memory_reserved(device) / gb # GiB reserved + a = torch.cuda.memory_allocated(device) / gb # GiB allocated + f = t - (r + a) # GiB free + LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') + + # Profile batch sizes + batch_sizes = [1, 2, 4, 8, 16] + try: + img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] + results = profile(img, model, n=3, device=device) + except Exception as e: + LOGGER.warning(f'{prefix}{e}') + + # Fit a solution + y = [x[2] for x in results if x] # memory [2] + p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit + b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) + if None in results: # some sizes failed + i = results.index(None) # first fail index + if b >= batch_sizes[i]: # y intercept above failure point + b = batch_sizes[max(i - 1, 0)] # select prior safe point + if b < 1 or b > 1024: # b outside of safe range + b = batch_size + LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') + + fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted + LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') + return b diff --git a/utils/aws/__init__.py b/utils/aws/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/aws/mime.sh b/utils/aws/mime.sh new file mode 100644 index 0000000..c319a83 --- /dev/null +++ b/utils/aws/mime.sh @@ -0,0 +1,26 @@ +# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ +# This script will run on every instance restart, not only on first start +# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- + +Content-Type: multipart/mixed; boundary="//" +MIME-Version: 1.0 + +--// +Content-Type: text/cloud-config; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="cloud-config.txt" + +#cloud-config +cloud_final_modules: +- [scripts-user, always] + +--// +Content-Type: text/x-shellscript; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="userdata.txt" + +#!/bin/bash +# --- paste contents of userdata.sh here --- +--// diff --git a/utils/aws/resume.py b/utils/aws/resume.py new file mode 100644 index 0000000..b21731c --- /dev/null +++ b/utils/aws/resume.py @@ -0,0 +1,40 @@ +# Resume all interrupted trainings in yolov5/ dir including DDP trainings +# Usage: $ python utils/aws/resume.py + +import os +import sys +from pathlib import Path + +import torch +import yaml + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[2] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +port = 0 # --master_port +path = Path('').resolve() +for last in path.rglob('*/**/last.pt'): + ckpt = torch.load(last) + if ckpt['optimizer'] is None: + continue + + # Load opt.yaml + with open(last.parent.parent / 'opt.yaml', errors='ignore') as f: + opt = yaml.safe_load(f) + + # Get device count + d = opt['device'].split(',') # devices + nd = len(d) # number of devices + ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel + + if ddp: # multi-GPU + port += 1 + cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + else: # single-GPU + cmd = f'python train.py --resume {last}' + + cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread + print(cmd) + os.system(cmd) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh new file mode 100644 index 0000000..5fc1332 --- /dev/null +++ b/utils/aws/userdata.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html +# This script will run only once on first instance start (for a re-start script see mime.sh) +# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir +# Use >300 GB SSD + +cd home/ubuntu +if [ ! -d yolov5 ]; then + echo "Running first-time script." # install dependencies, download COCO, pull Docker + git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 + cd yolov5 + bash data/scripts/get_coco.sh && echo "COCO done." & + sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & + python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & + wait && echo "All tasks done." # finish background tasks +else + echo "Running re-start script." # resume interrupted runs + i=0 + list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' + while IFS= read -r id; do + ((i++)) + echo "restarting container $i: $id" + sudo docker start $id + # sudo docker exec -it $id python train.py --resume # single-GPU + sudo docker exec -d $id python utils/aws/resume.py # multi-scenario + done <<<"$list" +fi diff --git a/utils/callbacks.py b/utils/callbacks.py new file mode 100644 index 0000000..166d893 --- /dev/null +++ b/utils/callbacks.py @@ -0,0 +1,76 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Callback utils +""" + +import threading + + +class Callbacks: + """" + Handles all registered callbacks for YOLOv5 Hooks + """ + + def __init__(self): + # Define the available callbacks + self._callbacks = { + 'on_pretrain_routine_start': [], + 'on_pretrain_routine_end': [], + 'on_train_start': [], + 'on_train_epoch_start': [], + 'on_train_batch_start': [], + 'optimizer_step': [], + 'on_before_zero_grad': [], + 'on_train_batch_end': [], + 'on_train_epoch_end': [], + 'on_val_start': [], + 'on_val_batch_start': [], + 'on_val_image_end': [], + 'on_val_batch_end': [], + 'on_val_end': [], + 'on_fit_epoch_end': [], # fit = train + val + 'on_model_save': [], + 'on_train_end': [], + 'on_params_update': [], + 'teardown': [],} + self.stop_training = False # set True to interrupt training + + def register_action(self, hook, name='', callback=None): + """ + Register a new action to a callback hook + + Args: + hook: The callback hook name to register the action to + name: The name of the action for later reference + callback: The callback to fire + """ + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" + assert callable(callback), f"callback '{callback}' is not callable" + self._callbacks[hook].append({'name': name, 'callback': callback}) + + def get_registered_actions(self, hook=None): + """" + Returns all the registered actions by callback hook + + Args: + hook: The name of the hook to check, defaults to all + """ + return self._callbacks[hook] if hook else self._callbacks + + def run(self, hook, *args, thread=False, **kwargs): + """ + Loop through the registered actions and fire all callbacks on main thread + + Args: + hook: The name of the hook to check, defaults to all + args: Arguments to receive from YOLOv5 + thread: (boolean) Run callbacks in daemon thread + kwargs: Keyword Arguments to receive from YOLOv5 + """ + + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" + for logger in self._callbacks[hook]: + if thread: + threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start() + else: + logger['callback'](*args, **kwargs) diff --git a/utils/dataloaders.py b/utils/dataloaders.py new file mode 100644 index 0000000..d849d51 --- /dev/null +++ b/utils/dataloaders.py @@ -0,0 +1,1181 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Dataloaders and dataset utils +""" + +import contextlib +import glob +import hashlib +import json +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import Pool, ThreadPool +from pathlib import Path +from threading import Thread +from urllib.parse import urlparse +from zipfile import ZipFile + +import numpy as np +import torch +import torch.nn.functional as F +import torchvision +import yaml +from PIL import ExifTags, Image, ImageOps +from torch.utils.data import DataLoader, Dataset, dataloader, distributed +from tqdm import tqdm + +from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, + cutout, letterbox, mixup, random_perspective) +from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, + cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) +from utils.torch_utils import torch_distributed_zero_first + +# Parameters +HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes +VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes +BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(paths): + # Returns a single hash value of a list of paths (files or dirs) + size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes + h = hashlib.md5(str(size).encode()) # hash sizes + h.update(''.join(paths).encode()) # hash paths + return h.hexdigest() # return hash + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + with contextlib.suppress(Exception): + rotation = dict(img._getexif().items())[orientation] + if rotation in [6, 8]: # rotation 270 or 90 + s = (s[1], s[0]) + return s + + +def exif_transpose(image): + """ + Transpose a PIL image accordingly if it has an EXIF Orientation tag. + Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112, 1) # default 1 + if orientation > 1: + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90}.get(orientation) + if method is not None: + image = image.transpose(method) + del exif[0x0112] + image.info["exif"] = exif.tobytes() + return image + + +def seed_worker(worker_id): + # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader + worker_seed = torch.initial_seed() % 2 ** 32 + np.random.seed(worker_seed) + random.seed(worker_seed) + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + quad=False, + prefix='', + shuffle=False): + if rect and shuffle: + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabels( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return loader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, + worker_init_fn=seed_worker, + generator=generator), dataset + + +class InfiniteDataLoader(dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for _ in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler: + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadScreenshots: + # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` + def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): + # source = [screen_number left top width height] (pixels) + check_requirements('mss') + import mss + + source, *params = source.split() + self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0 + if len(params) == 1: + self.screen = int(params[0]) + elif len(params) == 4: + left, top, width, height = (int(x) for x in params) + elif len(params) == 5: + self.screen, left, top, width, height = (int(x) for x in params) + self.img_size = img_size + self.stride = stride + self.transforms = transforms + self.auto = auto + self.mode = 'stream' + self.frame = 0 + self.sct = mss.mss() + + # Parse monitor shape + monitor = self.sct.monitors[self.screen] + self.top = monitor["top"] if top is None else (monitor["top"] + top) + self.left = monitor["left"] if left is None else (monitor["left"] + left) + self.width = width or monitor["width"] + self.height = height or monitor["height"] + self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height} + + def __iter__(self): + return self + + def __next__(self): + # mss screen capture: get raw pixels from the screen as np array + im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR + s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: " + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + self.frame += 1 + return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s + + +class LoadImages: + # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` + def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + files = [] + for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: + p = str(Path(p).resolve()) + if '*' in p: + files.extend(sorted(glob.glob(p, recursive=True))) # glob + elif os.path.isdir(p): + files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir + elif os.path.isfile(p): + files.append(p) # files + else: + raise FileNotFoundError(f'{p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + self.auto = auto + self.transforms = transforms # optional + self.vid_stride = vid_stride # video frame-rate stride + if any(videos): + self._new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + for _ in range(self.vid_stride): + self.cap.grab() + ret_val, im0 = self.cap.retrieve() + while not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + path = self.files[self.count] + self._new_video(path) + ret_val, im0 = self.cap.read() + + self.frame += 1 + # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' + + else: + # Read image + self.count += 1 + im0 = cv2.imread(path) # BGR + assert im0 is not None, f'Image Not Found {path}' + s = f'image {self.count}/{self.nf} {path}: ' + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + + return path, im, im0, self.cap, s + + def _new_video(self, path): + # Create a new video capture object + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) + self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees + # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 + + def _cv2_rotate(self, im): + # Rotate a cv2 video manually + if self.orientation == 0: + return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) + elif self.orientation == 180: + return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) + elif self.orientation == 90: + return cv2.rotate(im, cv2.ROTATE_180) + return im + + def __len__(self): + return self.nf # number of files + + +class LoadStreams: + # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` + def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + torch.backends.cudnn.benchmark = True # faster for fixed-size inference + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + self.vid_stride = vid_stride # video frame-rate stride + sources = Path(sources).read_text().rsplit() if Path(sources).is_file() else [sources] + n = len(sources) + self.sources = [clean_str(x) for x in sources] # clean source names for later + self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n + for i, s in enumerate(sources): # index, source + # Start thread to read frames from video stream + st = f'{i + 1}/{n}: {s}... ' + if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video + check_requirements(('pafy', 'youtube_dl==2020.12.2')) + import pafy + s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + if s == 0: + assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' + assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.' + cap = cv2.VideoCapture(s) + assert cap.isOpened(), f'{st}Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan + self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback + + _, self.imgs[i] = cap.read() # guarantee first frame + self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) + LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + self.threads[i].start() + LOGGER.info('') # newline + + # check for common shapes + s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs]) + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + self.auto = auto and self.rect + self.transforms = transforms # optional + if not self.rect: + LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.') + + def update(self, i, cap, stream): + # Read stream `i` frames in daemon thread + n, f = 0, self.frames[i] # frame number, frame array + while cap.isOpened() and n < f: + n += 1 + cap.grab() # .read() = .grab() followed by .retrieve() + if n % self.vid_stride == 0: + success, im = cap.retrieve() + if success: + self.imgs[i] = im + else: + LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') + self.imgs[i] = np.zeros_like(self.imgs[i]) + cap.open(stream) # re-open stream if signal was lost + time.sleep(0.0) # wait time + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + im0 = self.imgs.copy() + if self.transforms: + im = np.stack([self.transforms(x) for x in im0]) # transforms + else: + im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize + im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW + im = np.ascontiguousarray(im) # contiguous + + return self.sources, im, im0, None, '' + + def __len__(self): + return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings + return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] + + +class LoadImagesAndLabels(Dataset): + # YOLOv5 train_loader/val_loader, loads images and labels for training and validation + cache_version = 0.6 # dataset labels *.cache version + rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] + + def __init__(self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0.0, + prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + self.albumentations = Albumentations(size=img_size) if augment else None + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('*.*')) # pathlib + elif p.is_file(): # file + with open(p) as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + else: + raise FileNotFoundError(f'{prefix}{p} does not exist') + self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib + assert self.im_files, f'{prefix}No images found' + except Exception as e: + raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') + + # Check cache + self.label_files = img2label_paths(self.im_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') + try: + cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict + assert cache['version'] == self.cache_version # matches current version + assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash + except Exception: + cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total + if exists and LOCAL_RANK in {-1, 0}: + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" + tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results + if cache['msgs']: + LOGGER.info('\n'.join(cache['msgs'])) # display warnings + assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' + + # Read cache + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items + labels, shapes, self.segments = zip(*cache.values()) + nl = len(np.concatenate(labels, 0)) # number of labels + assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}' + self.labels = list(labels) + self.shapes = np.array(shapes) + self.im_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + n = len(shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Update labels + include_class = [] # filter labels to include only these classes (optional) + include_class_array = np.array(include_class).reshape(1, -1) + for i, (label, segment) in enumerate(zip(self.labels, self.segments)): + if include_class: + j = (label[:, 0:1] == include_class_array).any(1) + self.labels[i] = label[j] + if segment: + self.segments[i] = segment[j] + if single_cls: # single-class training, merge all classes into 0 + self.labels[i][:, 0] = 0 + if segment: + self.segments[i][:, 0] = 0 + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.im_files = [self.im_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.segments = [self.segments[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride + + # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources) + self.ims = [None] * n + self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] + if cache_images: + gb = 0 # Gigabytes of cached images + self.im_hw0, self.im_hw = [None] * n, [None] * n + fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image + results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) + pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT, disable=LOCAL_RANK > 0) + for i, x in pbar: + if cache_images == 'disk': + gb += self.npy_files[i].stat().st_size + else: # 'ram' + self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + gb += self.ims[i].nbytes + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' + pbar.close() + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages + desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." + with Pool(NUM_THREADS) as pool: + pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), + desc=desc, + total=len(self.im_files), + bar_format=BAR_FORMAT) + for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f + if im_file: + x[im_file] = [lb, shape, segments] + if msg: + msgs.append(msg) + pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt" + + pbar.close() + if msgs: + LOGGER.info('\n'.join(msgs)) + if nf == 0: + LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}') + x['hash'] = get_hash(self.label_files + self.im_files) + x['results'] = nf, nm, ne, nc, len(self.im_files) + x['msgs'] = msgs # warnings + x['version'] = self.cache_version # cache version + try: + np.save(path, x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix + LOGGER.info(f'{prefix}New cache created: {path}') + except Exception as e: + LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable + return x + + def __len__(self): + return len(self.im_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + img, labels = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp['mixup']: + img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels = random_perspective(img, + labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) + + if self.augment: + # Albumentations + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + + # HSV color-space + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + + # Flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + + # Cutouts + # labels = cutout(img, labels, p=0.5) + # nl = len(labels) # update after cutout + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.im_files[index], shapes + + def load_image(self, i): + # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], + if im is None: # not cached in RAM + if fn.exists(): # load npy + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + assert im is not None, f'Image Not Found {f}' + h0, w0 = im.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA + im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp) + return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized + + def cache_images_to_disk(self, i): + # Saves an image as an *.npy file for faster loading + f = self.npy_files[i] + if not f.exists(): + np.save(f.as_posix(), cv2.imread(self.im_files[i])) + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + random.shuffle(indices) + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) + img4, labels4 = random_perspective(img4, + labels4, + segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + def load_mosaic9(self, index): + # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + random.shuffle(indices) + hp, wp = -1, -1 # height, width previous + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9 = random_perspective(img9, + labels9, + segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + @staticmethod + def collate_fn(batch): + im, label, path, shapes = zip(*batch) # transposed + for i, lb in enumerate(label): + lb[:, 0] = i # add target image index for build_targets() + return torch.stack(im, 0), torch.cat(label, 0), path, shapes + + @staticmethod + def collate_fn4(batch): + im, label, path, shapes = zip(*batch) # transposed + n = len(shapes) // 4 + im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + + ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale + for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW + i *= 4 + if random.random() < 0.5: + im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', + align_corners=False)[0].type(im[i].type()) + lb = label[i] + else: + im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2) + lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + im4.append(im1) + label4.append(lb) + + for i, lb in enumerate(label4): + lb[:, 0] = i # add target image index for build_targets() + + return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def flatten_recursive(path=DATASETS_DIR / 'coco128'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(f'{str(path)}_flat') + if os.path.exists(new_path): + shutil.rmtree(new_path) # delete output folder + os.makedirs(new_path) # make new output folder + for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes() + # Convert detection dataset into classification dataset, with one directory per class + path = Path(path) # images dir + shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in IMG_FORMATS: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file) as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + Usage: from utils.dataloaders import *; autosplit() + Arguments + path: Path to images directory + weights: Train, val, test weights (list, tuple) + annotated_only: Only use images with an annotated txt file + """ + path = Path(path) # images dir + files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only + n = len(files) # number of files + random.seed(0) # for reproducibility + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + for x in txt: + if (path.parent / x).exists(): + (path.parent / x).unlink() # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + for i, img in tqdm(zip(indices, files), total=n): + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label + with open(path.parent / txt[i], 'a') as f: + f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file + + +def verify_image_label(args): + # Verify one image-label pair + im_file, lb_file, prefix = args + nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' + if im.format.lower() in ('jpg', 'jpeg'): + with open(im_file, 'rb') as f: + f.seek(-2, 2) + if f.read() != b'\xff\xd9': # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) + msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' + + # verify labels + if os.path.isfile(lb_file): + nf = 1 # label found + with open(lb_file) as f: + lb = [x.split() for x in f.read().strip().splitlines() if len(x)] + if any(len(x) > 6 for x in lb): # is segment + classes = np.array([x[0] for x in lb], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) + lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + lb = np.array(lb, dtype=np.float32) + nl = len(lb) + if nl: + assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' + assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' + assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' + _, i = np.unique(lb, axis=0, return_index=True) + if len(i) < nl: # duplicate row check + lb = lb[i] # remove duplicates + if segments: + segments = [segments[x] for x in i] + msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed' + else: + ne = 1 # label empty + lb = np.zeros((0, 5), dtype=np.float32) + else: + nm = 1 # label missing + lb = np.zeros((0, 5), dtype=np.float32) + return im_file, lb, shape, segments, nm, nf, ne, nc, msg + except Exception as e: + nc = 1 + msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' + return [None, None, None, None, nm, nf, ne, nc, msg] + + +class HUBDatasetStats(): + """ Return dataset statistics dictionary with images and instances counts per split per class + To run in parent directory: export PYTHONPATH="$PWD/yolov5" + Usage1: from utils.dataloaders import *; HUBDatasetStats('coco128.yaml', autodownload=True) + Usage2: from utils.dataloaders import *; HUBDatasetStats('path/to/coco128_with_yaml.zip') + Arguments + path: Path to data.yaml or data.zip (with data.yaml inside data.zip) + autodownload: Attempt to download dataset if not found locally + """ + + def __init__(self, path='coco128.yaml', autodownload=False): + # Initialize class + zipped, data_dir, yaml_path = self._unzip(Path(path)) + try: + with open(check_yaml(yaml_path), errors='ignore') as f: + data = yaml.safe_load(f) # data dict + if zipped: + data['path'] = data_dir + except Exception as e: + raise Exception("error/HUB/dataset_stats/yaml_load") from e + + check_dataset(data, autodownload) # download dataset if missing + self.hub_dir = Path(data['path'] + '-hub') + self.im_dir = self.hub_dir / 'images' + self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images + self.stats = {'nc': data['nc'], 'names': list(data['names'].values())} # statistics dictionary + self.data = data + + @staticmethod + def _find_yaml(dir): + # Return data.yaml file + files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive + assert files, f'No *.yaml file found in {dir}' + if len(files) > 1: + files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name + assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed' + assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}' + return files[0] + + def _unzip(self, path): + # Unzip data.zip + if not str(path).endswith('.zip'): # path is data.yaml + return False, None, path + assert Path(path).is_file(), f'Error unzipping {path}, file not found' + ZipFile(path).extractall(path=path.parent) # unzip + dir = path.with_suffix('') # dataset directory == zip name + assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' + return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path + + def _hub_ops(self, f, max_dim=1920): + # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing + f_new = self.im_dir / Path(f).name # dataset-hub image filename + try: # use PIL + im = Image.open(f) + r = max_dim / max(im.height, im.width) # ratio + if r < 1.0: # image too large + im = im.resize((int(im.width * r), int(im.height * r))) + im.save(f_new, 'JPEG', quality=50, optimize=True) # save + except Exception as e: # use OpenCV + LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}') + im = cv2.imread(f) + im_height, im_width = im.shape[:2] + r = max_dim / max(im_height, im_width) # ratio + if r < 1.0: # image too large + im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) + cv2.imwrite(str(f_new), im) + + def get_json(self, save=False, verbose=False): + # Return dataset JSON for Ultralytics HUB + def _round(labels): + # Update labels to integer class and 6 decimal place floats + return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] + + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + self.stats[split] = None # i.e. no test set + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + x = np.array([ + np.bincount(label[:, 0].astype(int), minlength=self.data['nc']) + for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics')]) # shape(128x80) + self.stats[split] = { + 'instance_stats': { + 'total': int(x.sum()), + 'per_class': x.sum(0).tolist()}, + 'image_stats': { + 'total': dataset.n, + 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}, + 'labels': [{ + str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]} + + # Save, print and return + if save: + stats_path = self.hub_dir / 'stats.json' + print(f'Saving {stats_path.resolve()}...') + with open(stats_path, 'w') as f: + json.dump(self.stats, f) # save stats.json + if verbose: + print(json.dumps(self.stats, indent=2, sort_keys=False)) + return self.stats + + def process_images(self): + # Compress images for Ultralytics HUB + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + desc = f'{split} images' + for _ in tqdm(ThreadPool(NUM_THREADS).imap(self._hub_ops, dataset.im_files), total=dataset.n, desc=desc): + pass + print(f'Done. All images saved to {self.im_dir}') + return self.im_dir + + +# Classification dataloaders ------------------------------------------------------------------------------------------- +class ClassificationDataset(torchvision.datasets.ImageFolder): + """ + YOLOv5 Classification Dataset. + Arguments + root: Dataset path + transform: torchvision transforms, used by default + album_transform: Albumentations transforms, used if installed + """ + + def __init__(self, root, augment, imgsz, cache=False): + super().__init__(root=root) + self.torch_transforms = classify_transforms(imgsz) + self.album_transforms = classify_albumentations(augment, imgsz) if augment else None + self.cache_ram = cache is True or cache == 'ram' + self.cache_disk = cache == 'disk' + self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im + + def __getitem__(self, i): + f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image + if self.cache_ram and im is None: + im = self.samples[i][3] = cv2.imread(f) + elif self.cache_disk: + if not fn.exists(): # load npy + np.save(fn.as_posix(), cv2.imread(f)) + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + if self.album_transforms: + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] + else: + sample = self.torch_transforms(im) + return sample, j + + +def create_classification_dataloader(path, + imgsz=224, + batch_size=16, + augment=True, + cache=False, + rank=-1, + workers=8, + shuffle=True): + # Returns Dataloader object to be used with YOLOv5 Classifier + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache) + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return InfiniteDataLoader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + worker_init_fn=seed_worker, + generator=generator) # or DataLoader(persistent_workers=True) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile new file mode 100644 index 0000000..be5c2fb --- /dev/null +++ b/utils/docker/Dockerfile @@ -0,0 +1,65 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 +# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference + +# Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch +FROM nvcr.io/nvidia/pytorch:22.09-py3 +RUN rm -rf /opt/pytorch # remove 1.2GB dir + +# Downloads to user config dir +ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ + +# Install linux packages +RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx + +# Install pip packages +COPY requirements.txt . +RUN python -m pip install --upgrade pip wheel +RUN pip uninstall -y Pillow torchtext torch torchvision +RUN pip install --no-cache -r requirements.txt albumentations comet clearml gsutil notebook Pillow>=9.1.0 \ + 'opencv-python<4.6.0.66' \ + --extra-index-url https://download.pytorch.org/whl/cu113 + +# Create working directory +RUN mkdir -p /usr/src/app +WORKDIR /usr/src/app + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app + +# Set environment variables +ENV OMP_NUM_THREADS=8 + + +# Usage Examples ------------------------------------------------------------------------------------------------------- + +# Build and Push +# t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t + +# Pull and Run +# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t + +# Pull and Run with local directory access +# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t + +# Kill all +# sudo docker kill $(sudo docker ps -q) + +# Kill all image-based +# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) + +# DockerHub tag update +# t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew + +# Clean up +# docker system prune -a --volumes + +# Update Ubuntu drivers +# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ + +# DDP test +# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 + +# GCP VM from Image +# docker.io/ultralytics/yolov5:latest diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 new file mode 100644 index 0000000..6e8ff77 --- /dev/null +++ b/utils/docker/Dockerfile-arm64 @@ -0,0 +1,41 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 +# Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi + +# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu +FROM arm64v8/ubuntu:20.04 + +# Downloads to user config dir +ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ + +# Install linux packages +RUN apt update +RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata +RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev +# RUN alias python=python3 + +# Install pip packages +COPY requirements.txt . +RUN python3 -m pip install --upgrade pip wheel +RUN pip install --no-cache -r requirements.txt gsutil notebook \ + tensorflow-aarch64 + # tensorflowjs \ + # onnx onnx-simplifier onnxruntime \ + # coremltools openvino-dev \ + +# Create working directory +RUN mkdir -p /usr/src/app +WORKDIR /usr/src/app + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app + + +# Usage Examples ------------------------------------------------------------------------------------------------------- + +# Build and Push +# t=ultralytics/yolov5:latest-M1 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t + +# Pull and Run +# t=ultralytics/yolov5:latest-M1 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu new file mode 100644 index 0000000..d6fac64 --- /dev/null +++ b/utils/docker/Dockerfile-cpu @@ -0,0 +1,40 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 +# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments + +# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu +FROM ubuntu:20.04 + +# Downloads to user config dir +ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ + +# Install linux packages +RUN apt update +RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata +RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev +# RUN alias python=python3 + +# Install pip packages +COPY requirements.txt . +RUN python3 -m pip install --upgrade pip wheel +RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \ + # openvino-dev \ + --extra-index-url https://download.pytorch.org/whl/cpu + +# Create working directory +RUN mkdir -p /usr/src/app +WORKDIR /usr/src/app + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app + + +# Usage Examples ------------------------------------------------------------------------------------------------------- + +# Build and Push +# t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t + +# Pull and Run +# t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/utils/downloads.py b/utils/downloads.py new file mode 100644 index 0000000..60417c1 --- /dev/null +++ b/utils/downloads.py @@ -0,0 +1,189 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Download utils +""" + +import logging +import os +import platform +import subprocess +import time +import urllib +from pathlib import Path +from zipfile import ZipFile + +import requests +import torch + + +def is_url(url, check=True): + # Check if string is URL and check if URL exists + try: + url = str(url) + result = urllib.parse.urlparse(url) + assert all([result.scheme, result.netloc]) # check if is url + return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online + except (AssertionError, urllib.request.HTTPError): + return False + + +def gsutil_getsize(url=''): + # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du + s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') + return eval(s.split(' ')[0]) if len(s) else 0 # bytes + + +def url_getsize(url='https://ultralytics.com/images/bus.jpg'): + # Return downloadable file size in bytes + response = requests.head(url, allow_redirects=True) + return int(response.headers.get('content-length', -1)) + + +def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): + # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes + from utils.general import LOGGER + + file = Path(file) + assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" + try: # url1 + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO) + assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check + except Exception as e: # url2 + if file.exists(): + file.unlink() # remove partial downloads + LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') + os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + finally: + if not file.exists() or file.stat().st_size < min_bytes: # check + if file.exists(): + file.unlink() # remove partial downloads + LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") + LOGGER.info('') + + +def attempt_download(file, repo='ultralytics/yolov5', release='v6.2'): + # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc. + from utils.general import LOGGER + + def github_assets(repository, version='latest'): + # Return GitHub repo tag (i.e. 'v6.2') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) + if version != 'latest': + version = f'tags/{version}' # i.e. tags/v6.2 + response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api + return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets + + file = Path(str(file).strip().replace("'", '')) + if not file.exists(): + # URL specified + name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. + if str(file).startswith(('http:/', 'https:/')): # download + url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... + if Path(file).is_file(): + LOGGER.info(f'Found {url} locally at {file}') # file already exists + else: + safe_download(file=file, url=url, min_bytes=1E5) + return file + + # GitHub assets + assets = [f'yolov5{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default + try: + tag, assets = github_assets(repo, release) + except Exception: + try: + tag, assets = github_assets(repo) # latest release + except Exception: + try: + tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] + except Exception: + tag = release + + file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) + if name in assets: + url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl' # backup gdrive mirror + safe_download( + file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') + + return str(file) + + +def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): + # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download() + t = time.time() + file = Path(file) + cookie = Path('cookie') # gdrive cookie + print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') + if file.exists(): + file.unlink() # remove existing file + if cookie.exists(): + cookie.unlink() # remove existing cookie + + # Attempt file download + out = "NUL" if platform.system() == "Windows" else "/dev/null" + os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') + if os.path.exists('cookie'): # large file + s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' + else: # small file + s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' + r = os.system(s) # execute, capture return + if cookie.exists(): + cookie.unlink() # remove existing cookie + + # Error check + if r != 0: + if file.exists(): + file.unlink() # remove partial + print('Download error ') # raise Exception('Download error') + return r + + # Unzip if archive + if file.suffix == '.zip': + print('unzipping... ', end='') + ZipFile(file).extractall(path=file.parent) # unzip + file.unlink() # remove zip + + print(f'Done ({time.time() - t:.1f}s)') + return r + + +def get_token(cookie="./cookie"): + with open(cookie) as f: + for line in f: + if "download" in line: + return line.split()[-1] + return "" + + +# Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- +# +# +# def upload_blob(bucket_name, source_file_name, destination_blob_name): +# # Uploads a file to a bucket +# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python +# +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(destination_blob_name) +# +# blob.upload_from_filename(source_file_name) +# +# print('File {} uploaded to {}.'.format( +# source_file_name, +# destination_blob_name)) +# +# +# def download_blob(bucket_name, source_blob_name, destination_file_name): +# # Uploads a blob from a bucket +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(source_blob_name) +# +# blob.download_to_filename(destination_file_name) +# +# print('Blob {} downloaded to {}.'.format( +# source_blob_name, +# destination_file_name)) diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md new file mode 100644 index 0000000..a726acb --- /dev/null +++ b/utils/flask_rest_api/README.md @@ -0,0 +1,73 @@ +# Flask REST API + +[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are +commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API +created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). + +## Requirements + +[Flask](https://palletsprojects.com/p/flask/) is required. Install with: + +```shell +$ pip install Flask +``` + +## Run + +After Flask installation run: + +```shell +$ python3 restapi.py --port 5000 +``` + +Then use [curl](https://curl.se/) to perform a request: + +```shell +$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' +``` + +The model inference results are returned as a JSON response: + +```json +[ + { + "class": 0, + "confidence": 0.8900438547, + "height": 0.9318675399, + "name": "person", + "width": 0.3264600933, + "xcenter": 0.7438579798, + "ycenter": 0.5207948685 + }, + { + "class": 0, + "confidence": 0.8440024257, + "height": 0.7155083418, + "name": "person", + "width": 0.6546785235, + "xcenter": 0.427829951, + "ycenter": 0.6334488392 + }, + { + "class": 27, + "confidence": 0.3771208823, + "height": 0.3902671337, + "name": "tie", + "width": 0.0696444362, + "xcenter": 0.3675483763, + "ycenter": 0.7991207838 + }, + { + "class": 27, + "confidence": 0.3527112305, + "height": 0.1540903747, + "name": "tie", + "width": 0.0336618312, + "xcenter": 0.7814827561, + "ycenter": 0.5065554976 + } +] +``` + +An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given +in `example_request.py` diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py new file mode 100644 index 0000000..773ad89 --- /dev/null +++ b/utils/flask_rest_api/example_request.py @@ -0,0 +1,19 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Perform test request +""" + +import pprint + +import requests + +DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" +IMAGE = "zidane.jpg" + +# Read image +with open(IMAGE, "rb") as f: + image_data = f.read() + +response = requests.post(DETECTION_URL, files={"image": image_data}).json() + +pprint.pprint(response) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py new file mode 100644 index 0000000..8482435 --- /dev/null +++ b/utils/flask_rest_api/restapi.py @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run a Flask REST API exposing one or more YOLOv5s models +""" + +import argparse +import io + +import torch +from flask import Flask, request +from PIL import Image + +app = Flask(__name__) +models = {} + +DETECTION_URL = "/v1/object-detection/" + + +@app.route(DETECTION_URL, methods=["POST"]) +def predict(model): + if request.method != "POST": + return + + if request.files.get("image"): + # Method 1 + # with request.files["image"] as f: + # im = Image.open(io.BytesIO(f.read())) + + # Method 2 + im_file = request.files["image"] + im_bytes = im_file.read() + im = Image.open(io.BytesIO(im_bytes)) + + if model in models: + results = models[model](im, size=640) # reduce size=320 for faster inference + return results.pandas().xyxy[0].to_json(orient="records") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") + parser.add_argument("--port", default=5000, type=int, help="port number") + parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s') + opt = parser.parse_args() + + for m in opt.model: + models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True) + + app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat diff --git a/utils/general.py b/utils/general.py new file mode 100644 index 0000000..e2faca9 --- /dev/null +++ b/utils/general.py @@ -0,0 +1,1083 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +General utils +""" + +import contextlib +import glob +import inspect +import logging +import math +import os +import platform +import random +import re +import shutil +import signal +import sys +import time +import urllib +from copy import deepcopy +from datetime import datetime +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from subprocess import check_output +from typing import Optional +from zipfile import ZipFile + +import cv2 +import numpy as np +import pandas as pd +import pkg_resources as pkg +import torch +import torchvision +import yaml + +from utils import TryExcept, emojis +from utils.downloads import gsutil_getsize +from utils.metrics import box_iou, fitness + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +RANK = int(os.getenv('RANK', -1)) + +# Settings +NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory +AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode +VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode +FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf + +torch.set_printoptions(linewidth=320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +pd.options.display.max_columns = 10 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads +os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) + + +def is_ascii(s=''): + # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) + s = str(s) # convert list, tuple, None, etc. to str + return len(s.encode().decode('ascii', 'ignore')) == len(s) + + +def is_chinese(s='人工智能'): + # Is string composed of any Chinese characters? + return bool(re.search('[\u4e00-\u9fff]', str(s))) + + +def is_colab(): + # Is environment a Google Colab instance? + return 'COLAB_GPU' in os.environ + + +def is_kaggle(): + # Is environment a Kaggle Notebook? + return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + + +def is_docker() -> bool: + """Check if the process runs inside a docker container.""" + if Path("/.dockerenv").exists(): + return True + try: # check if docker is in control groups + with open("/proc/self/cgroup") as file: + return any("docker" in line for line in file) + except OSError: + return False + + +def is_writeable(dir, test=False): + # Return True if directory has write permissions, test opening a file with write permissions if test=True + if not test: + return os.access(dir, os.W_OK) # possible issues on Windows + file = Path(dir) / 'tmp.txt' + try: + with open(file, 'w'): # open file with write permissions + pass + file.unlink() # remove file + return True + except OSError: + return False + + +def set_logging(name=None, verbose=VERBOSE): + # Sets level and returns logger + if is_kaggle() or is_colab(): + for h in logging.root.handlers: + logging.root.removeHandler(h) # remove all handlers associated with the root logger object + rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR + log = logging.getLogger(name) + log.setLevel(level) + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter("%(message)s")) + handler.setLevel(level) + log.addHandler(handler) + + +set_logging() # run before defining LOGGER +LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.) +if platform.system() == 'Windows': + for fn in LOGGER.info, LOGGER.warning: + setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging + + +def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): + # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. + env = os.getenv(env_var) + if env: + path = Path(env) # use environment variable + else: + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs + path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir + path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable + path.mkdir(exist_ok=True) # make if required + return path + + +CONFIG_DIR = user_config_dir() # Ultralytics settings dir + + +class Profile(contextlib.ContextDecorator): + # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager + def __init__(self, t=0.0): + self.t = t + self.cuda = torch.cuda.is_available() + + def __enter__(self): + self.start = self.time() + return self + + def __exit__(self, type, value, traceback): + self.dt = self.time() - self.start # delta-time + self.t += self.dt # accumulate dt + + def time(self): + if self.cuda: + torch.cuda.synchronize() + return time.time() + + +class Timeout(contextlib.ContextDecorator): + # YOLOv5 Timeout class. Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager + def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): + self.seconds = int(seconds) + self.timeout_message = timeout_msg + self.suppress = bool(suppress_timeout_errors) + + def _timeout_handler(self, signum, frame): + raise TimeoutError(self.timeout_message) + + def __enter__(self): + if platform.system() != 'Windows': # not supported on Windows + signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM + signal.alarm(self.seconds) # start countdown for SIGALRM to be raised + + def __exit__(self, exc_type, exc_val, exc_tb): + if platform.system() != 'Windows': + signal.alarm(0) # Cancel SIGALRM if it's scheduled + if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError + return True + + +class WorkingDirectory(contextlib.ContextDecorator): + # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager + def __init__(self, new_dir): + self.dir = new_dir # new dir + self.cwd = Path.cwd().resolve() # current dir + + def __enter__(self): + os.chdir(self.dir) + + def __exit__(self, exc_type, exc_val, exc_tb): + os.chdir(self.cwd) + + +def methods(instance): + # Get class/instance methods + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] + + +def print_args(args: Optional[dict] = None, show_file=True, show_func=False): + # Print function arguments (optional args dict) + x = inspect.currentframe().f_back # previous frame + file, _, func, _, _ = inspect.getframeinfo(x) + if args is None: # get args automatically + args, _, _, frm = inspect.getargvalues(x) + args = {k: v for k, v in frm.items() if k in args} + try: + file = Path(file).resolve().relative_to(ROOT).with_suffix('') + except ValueError: + file = Path(file).stem + s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') + LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) + + +def init_seeds(seed=0, deterministic=False): + # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe + # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287 + if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 + torch.use_deterministic_algorithms(True) + torch.backends.cudnn.deterministic = True + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' + os.environ['PYTHONHASHSEED'] = str(seed) + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape} + + +def get_default_args(func): + # Get func() default arguments + signature = inspect.signature(func) + return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def file_age(path=__file__): + # Return days since last file update + dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta + return dt.days # + dt.seconds / 86400 # fractional days + + +def file_date(path=__file__): + # Return human-readable file modification date, i.e. '2021-3-26' + t = datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def file_size(path): + # Return file/dir size (MB) + mb = 1 << 20 # bytes to MiB (1024 ** 2) + path = Path(path) + if path.is_file(): + return path.stat().st_size / mb + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb + else: + return 0.0 + + +def check_online(): + # Check internet connectivity + import socket + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility + return True + except OSError: + return False + + +def git_describe(path=ROOT): # path must be a directory + # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + try: + assert (Path(path) / '.git').is_dir() + return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] + except Exception: + return '' + + +@TryExcept() +@WorkingDirectory(ROOT) +def check_git_status(repo='ultralytics/yolov5', branch='master'): + # YOLOv5 status check, recommend 'git pull' if code is out of date + url = f'https://github.com/{repo}' + msg = f', for updates see {url}' + s = colorstr('github: ') # string + assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg + assert check_online(), s + 'skipping check (offline)' + msg + + splits = re.split(pattern=r'\s', string=check_output('git remote -v', shell=True).decode()) + matches = [repo in s for s in splits] + if any(matches): + remote = splits[matches.index(True) - 1] + else: + remote = 'ultralytics' + check_output(f'git remote add {remote} {url}', shell=True) + check_output(f'git fetch {remote}', shell=True, timeout=5) # git fetch + local_branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind + if n > 0: + pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}' + s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update." + else: + s += f'up to date with {url} ✅' + LOGGER.info(s) + + +def check_python(minimum='3.7.0'): + # Check current python version vs. required python version + check_version(platform.python_version(), minimum, name='Python ', hard=True) + + +def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): + # Check version vs. required version + current, minimum = (pkg.parse_version(x) for x in (current, minimum)) + result = (current == minimum) if pinned else (current >= minimum) # bool + s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string + if hard: + assert result, emojis(s) # assert min requirements met + if verbose and not result: + LOGGER.warning(s) + return result + + +@TryExcept() +def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''): + # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str) + prefix = colorstr('red', 'bold', 'requirements:') + check_python() # check python version + if isinstance(requirements, Path): # requirements.txt file + file = requirements.resolve() + assert file.exists(), f"{prefix} {file} not found, check failed." + with file.open() as f: + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] + elif isinstance(requirements, str): + requirements = [requirements] + + s = '' + n = 0 + for r in requirements: + try: + pkg.require(r) + except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met + s += f'"{r}" ' + n += 1 + + if s and install and AUTOINSTALL: # check environment variable + LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") + try: + assert check_online(), "AutoUpdate skipped (offline)" + LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) + source = file if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + LOGGER.info(s) + except Exception as e: + LOGGER.warning(f'{prefix} ❌ {e}') + + +def check_img_size(imgsz, s=32, floor=0): + # Verify image size is a multiple of stride s in each dimension + if isinstance(imgsz, int): # integer i.e. img_size=640 + new_size = max(make_divisible(imgsz, int(s)), floor) + else: # list i.e. img_size=[640, 480] + imgsz = list(imgsz) # convert to list if tuple + new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] + if new_size != imgsz: + LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') + return new_size + + +def check_imshow(): + # Check if environment supports image displays + try: + assert not is_docker(), 'cv2.imshow() is disabled in Docker environments' + assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments' + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + return False + + +def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): + # Check file(s) for acceptable suffix + if file and suffix: + if isinstance(suffix, str): + suffix = [suffix] + for f in file if isinstance(file, (list, tuple)) else [file]: + s = Path(f).suffix.lower() # file suffix + if len(s): + assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" + + +def check_yaml(file, suffix=('.yaml', '.yml')): + # Search/download YAML file (if necessary) and return path, checking suffix + return check_file(file, suffix) + + +def check_file(file, suffix=''): + # Search/download file (if necessary) and return path + check_suffix(file, suffix) # optional + file = str(file) # convert to str() + if Path(file).is_file() or not file: # exists + return file + elif file.startswith(('http:/', 'https:/')): # download + url = file # warning: Pathlib turns :// -> :/ + file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth + if Path(file).is_file(): + LOGGER.info(f'Found {url} locally at {file}') # file already exists + else: + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check + return file + elif file.startswith('clearml://'): # ClearML Dataset ID + assert 'clearml' in sys.modules, "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." + return file + else: # search + files = [] + for d in 'data', 'models', 'utils': # search directories + files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file + assert len(files), f'File not found: {file}' # assert file was found + assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique + return files[0] # return file + + +def check_font(font=FONT, progress=False): + # Download font to CONFIG_DIR if necessary + font = Path(font) + file = CONFIG_DIR / font.name + if not font.exists() and not file.exists(): + url = f'https://ultralytics.com/assets/{font.name}' + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file), progress=progress) + + +def check_dataset(data, autodownload=True): + # Download, check and/or unzip dataset if not found locally + + # Download (optional) + extract_dir = '' + if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip + download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1) + data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) + extract_dir, autodownload = data.parent, False + + # Read yaml (optional) + if isinstance(data, (str, Path)): + data = yaml_load(data) # dictionary + + # Checks + for k in 'train', 'val', 'names': + assert k in data, f"data.yaml '{k}:' field missing ❌" + if isinstance(data['names'], (list, tuple)): # old array format + data['names'] = dict(enumerate(data['names'])) # convert to dict + data['nc'] = len(data['names']) + + # Resolve paths + path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' + if not path.is_absolute(): + path = (ROOT / path).resolve() + for k in 'train', 'val', 'test': + if data.get(k): # prepend path + if isinstance(data[k], str): + x = (path / data[k]).resolve() + if not x.exists() and data[k].startswith('../'): + x = (path / data[k][3:]).resolve() + data[k] = str(x) + else: + data[k] = [str((path / x).resolve()) for x in data[k]] + + # Parse yaml + train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) + if val: + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()]) + if not s or not autodownload: + raise Exception('Dataset not found ❌') + t = time.time() + if s.startswith('http') and s.endswith('.zip'): # URL + f = Path(s).name # filename + LOGGER.info(f'Downloading {s} to {f}...') + torch.hub.download_url_to_file(s, f) + Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root + ZipFile(f).extractall(path=DATASETS_DIR) # unzip + Path(f).unlink() # remove zip + r = None # success + elif s.startswith('bash '): # bash script + LOGGER.info(f'Running {s} ...') + r = os.system(s) + else: # python script + r = exec(s, {'yaml': data}) # return None + dt = f'({round(time.time() - t, 1)}s)' + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌" + LOGGER.info(f"Dataset download {s}") + check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts + return data # dictionary + + +def check_amp(model): + # Check PyTorch Automatic Mixed Precision (AMP) functionality. Return True on correct operation + from models.common import AutoShape, DetectMultiBackend + + def amp_allclose(model, im): + # All close FP32 vs AMP results + m = AutoShape(model, verbose=False) # model + a = m(im).xywhn[0] # FP32 inference + m.amp = True + b = m(im).xywhn[0] # AMP inference + return a.shape == b.shape and torch.allclose(a, b, atol=0.1) # close to 10% absolute tolerance + + prefix = colorstr('AMP: ') + device = next(model.parameters()).device # get model device + if device.type in ('cpu', 'mps'): + return False # AMP only used on CUDA devices + f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check + im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3)) + try: + assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im) + LOGGER.info(f'{prefix}checks passed ✅') + return True + except Exception: + help_url = 'https://github.com/ultralytics/yolov5/issues/7908' + LOGGER.warning(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}') + return False + + +def yaml_load(file='data.yaml'): + # Single-line safe yaml loading + with open(file, errors='ignore') as f: + return yaml.safe_load(f) + + +def yaml_save(file='data.yaml', data={}): + # Single-line safe yaml saving + with open(file, 'w') as f: + yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False) + + +def url2file(url): + # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt + url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ + return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + + +def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3): + # Multithreaded file download and unzip function, used in data.yaml for autodownload + def download_one(url, dir): + # Download 1 file + success = True + if Path(url).is_file(): + f = Path(url) # filename + else: # does not exist + f = dir / Path(url).name + LOGGER.info(f'Downloading {url} to {f}...') + for i in range(retry + 1): + if curl: + s = 'sS' if threads > 1 else '' # silent + r = os.system( + f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue + success = r == 0 + else: + torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download + success = f.is_file() + if success: + break + elif i < retry: + LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...') + else: + LOGGER.warning(f'❌ Failed to download {url}...') + + if unzip and success and f.suffix in ('.zip', '.tar', '.gz'): + LOGGER.info(f'Unzipping {f}...') + if f.suffix == '.zip': + ZipFile(f).extractall(path=dir) # unzip + elif f.suffix == '.tar': + os.system(f'tar xf {f} --directory {f.parent}') # unzip + elif f.suffix == '.gz': + os.system(f'tar xfz {f} --directory {f.parent}') # unzip + if delete: + f.unlink() # remove zip + + dir = Path(dir) + dir.mkdir(parents=True, exist_ok=True) # make directory + if threads > 1: + pool = ThreadPool(threads) + pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multithreaded + pool.close() + pool.join() + else: + for u in [url] if isinstance(url, (str, Path)) else url: + download_one(u, dir) + + +def make_divisible(x, divisor): + # Returns nearest x divisible by divisor + if isinstance(divisor, torch.Tensor): + divisor = int(divisor.max()) # to int + return math.ceil(x / divisor) * divisor + + +def clean_str(s): + # Cleans a string by replacing special characters with underscore _ + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = { + 'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +def labels_to_class_weights(labels, nc=80): + # Get class weights (inverse frequency) from training labels + if labels[0] is None: # no labels loaded + return torch.Tensor() + + labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO + classes = labels[:, 0].astype(int) # labels = [class xywh] + weights = np.bincount(classes, minlength=nc) # occurrences per class + + # Prepend gridpoint count (for uCE training) + # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image + # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start + + weights[weights == 0] = 1 # replace empty bins with 1 + weights = 1 / weights # number of targets per class + weights /= weights.sum() # normalize + return torch.from_numpy(weights).float() + + +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class_weights and image contents + # Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample + class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels]) + return (class_weights.reshape(1, nc) * class_counts).sum(1) + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + return [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x + y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + + +def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right + if clip: + clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center + y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center + y[:, 2] = (x[:, 2] - x[:, 0]) / w # width + y[:, 3] = (x[:, 3] - x[:, 1]) / h # height + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * x[:, 0] + padw # top left x + y[:, 1] = h * x[:, 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + s = np.concatenate((s, s[0:1, :]), axis=0) + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + +def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): + # Rescale boxes (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + boxes[:, [0, 2]] -= pad[0] # x padding + boxes[:, [1, 3]] -= pad[1] # y padding + boxes[:, :4] /= gain + clip_boxes(boxes, img0_shape) + return boxes + + +def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + segments[:, 0] -= pad[0] # x padding + segments[:, 1] -= pad[1] # y padding + segments /= gain + clip_segments(segments, img0_shape) + return segments + + +def clip_boxes(boxes, shape): + # Clip boxes (xyxy) to image shape (height, width) + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x1 + boxes[:, 1].clamp_(0, shape[0]) # y1 + boxes[:, 2].clamp_(0, shape[1]) # x2 + boxes[:, 3].clamp_(0, shape[0]) # y2 + else: # np.array (faster grouped) + boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 + + +def clip_segments(boxes, shape): + # Clip segments (xy1,xy2,...) to image shape (height, width) + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x + boxes[:, 1].clamp_(0, shape[0]) # y + else: # np.array (faster grouped) + boxes[:, 0] = boxes[:, 0].clip(0, shape[1]) # x + boxes[:, 1] = boxes[:, 1].clip(0, shape[0]) # y + + +def non_max_suppression( + prediction, + conf_thres=0.25, + iou_thres=0.45, + classes=None, + agnostic=False, + multi_label=False, + labels=(), + max_det=300, + nm=0, # number of masks +): + """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + + if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) + prediction = prediction[0] # select only inference output + + device = prediction.device + mps = 'mps' in device.type # Apple MPS + if mps: # MPS not fully supported yet, convert tensors to CPU before NMS + prediction = prediction.cpu() + bs = prediction.shape[0] # batch size + nc = prediction.shape[2] - nm - 5 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' + + # Settings + # min_wh = 2 # (pixels) minimum box width and height + max_wh = 7680 # (pixels) maximum box width and height + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 0.5 + 0.05 * bs # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + mi = 5 + nc # mask start index + output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + lb = labels[xi] + v = torch.zeros((len(lb), nc + nm + 5), device=x.device) + v[:, :4] = lb[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box/Mask + box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2) + mask = x[:, mi:] # zero columns if no masks + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1) + else: # best class only + conf, j = x[:, 5:mi].max(1, keepdim=True) + x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + else: + x = x[x[:, 4].argsort(descending=True)] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if mps: + output[xi] = output[xi].to(device) + if (time.time() - t) > time_limit: + LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') + break # time limit exceeded + + return output + + +def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() + # Strip optimizer from 'f' to finalize training, optionally save as 's' + x = torch.load(f, map_location=torch.device('cpu')) + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys + x[k] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") + + +def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): + evolve_csv = save_dir / 'evolve.csv' + evolve_yaml = save_dir / 'hyp_evolve.yaml' + keys = tuple(keys) + tuple(hyp.keys()) # [results + hyps] + keys = tuple(x.strip() for x in keys) + vals = results + tuple(hyp.values()) + n = len(keys) + + # Download (optional) + if bucket: + url = f'gs://{bucket}/evolve.csv' + if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): + os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local + + # Log to evolve.csv + s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header + with open(evolve_csv, 'a') as f: + f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') + + # Save yaml + with open(evolve_yaml, 'w') as f: + data = pd.read_csv(evolve_csv) + data = data.rename(columns=lambda x: x.strip()) # strip keys + i = np.argmax(fitness(data.values[:, :4])) # + generations = len(data) + f.write('# YOLOv5 Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' + + f'# Last generation: {generations - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + + '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') + yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False) + + # Print to screen + LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + prefix + + ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + prefix + ', '.join(f'{x:20.5g}' + for x in vals) + '\n\n') + + if bucket: + os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload + + +def apply_classifier(x, model, img, im0): + # Apply a second stage classifier to YOLO outputs + # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval() + im0 = [im0] if isinstance(im0, np.ndarray) else im0 + for i, d in enumerate(x): # per image + if d is not None and len(d): + d = d.clone() + + # Reshape and pad cutouts + b = xyxy2xywh(d[:, :4]) # boxes + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square + b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad + d[:, :4] = xywh2xyxy(b).long() + + # Rescale boxes from img_size to im0 size + scale_boxes(img.shape[2:], d[:, :4], im0[i].shape) + + # Classes + pred_cls1 = d[:, 5].long() + ims = [] + for a in d: + cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + im = cv2.resize(cutout, (224, 224)) # BGR + + im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + ims.append(im) + + pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction + x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections + + return x + + +def increment_path(path, exist_ok=False, sep='', mkdir=False): + # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. + path = Path(path) # os-agnostic + if path.exists() and not exist_ok: + path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') + + # Method 1 + for n in range(2, 9999): + p = f'{path}{sep}{n}{suffix}' # increment path + if not os.path.exists(p): # + break + path = Path(p) + + # Method 2 (deprecated) + # dirs = glob.glob(f"{path}{sep}*") # similar paths + # matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs] + # i = [int(m.groups()[0]) for m in matches if m] # indices + # n = max(i) + 1 if i else 2 # increment number + # path = Path(f"{path}{sep}{n}{suffix}") # increment path + + if mkdir: + path.mkdir(parents=True, exist_ok=True) # make directory + + return path + + +# OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------ +imshow_ = cv2.imshow # copy to avoid recursion errors + + +def imread(path, flags=cv2.IMREAD_COLOR): + return cv2.imdecode(np.fromfile(path, np.uint8), flags) + + +def imwrite(path, im): + try: + cv2.imencode(Path(path).suffix, im)[1].tofile(path) + return True + except Exception: + return False + + +def imshow(path, im): + imshow_(path.encode('unicode_escape').decode(), im) + + +cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine + +# Variables ------------------------------------------------------------------------------------------------------------ +NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm diff --git a/utils/google_app_engine/Dockerfile b/utils/google_app_engine/Dockerfile new file mode 100644 index 0000000..0155618 --- /dev/null +++ b/utils/google_app_engine/Dockerfile @@ -0,0 +1,25 @@ +FROM gcr.io/google-appengine/python + +# Create a virtualenv for dependencies. This isolates these packages from +# system-level packages. +# Use -p python3 or -p python3.7 to select python version. Default is version 2. +RUN virtualenv /env -p python3 + +# Setting these environment variables are the same as running +# source /env/bin/activate. +ENV VIRTUAL_ENV /env +ENV PATH /env/bin:$PATH + +RUN apt-get update && apt-get install -y python-opencv + +# Copy the application's requirements.txt and run pip to install all +# dependencies into the virtualenv. +ADD requirements.txt /app/requirements.txt +RUN pip install -r /app/requirements.txt + +# Add the application source code. +ADD . /app + +# Run a WSGI server to serve the application. gunicorn must be declared as +# a dependency in requirements.txt. +CMD gunicorn -b :$PORT main:app diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt new file mode 100644 index 0000000..42d7ffc --- /dev/null +++ b/utils/google_app_engine/additional_requirements.txt @@ -0,0 +1,4 @@ +# add these requirements in your app on top of the existing ones +pip==21.1 +Flask==1.0.2 +gunicorn==19.9.0 diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml new file mode 100644 index 0000000..5056b7c --- /dev/null +++ b/utils/google_app_engine/app.yaml @@ -0,0 +1,14 @@ +runtime: custom +env: flex + +service: yolov5app + +liveness_check: + initial_delay_sec: 600 + +manual_scaling: + instances: 1 +resources: + cpu: 1 + memory_gb: 4 + disk_size_gb: 20 diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py new file mode 100644 index 0000000..bc8dd76 --- /dev/null +++ b/utils/loggers/__init__.py @@ -0,0 +1,404 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Logging utils +""" + +import os +import warnings +from pathlib import Path + +import pkg_resources as pkg +import torch +from torch.utils.tensorboard import SummaryWriter + +from utils.general import LOGGER, colorstr, cv2 +from utils.loggers.clearml.clearml_utils import ClearmlLogger +from utils.loggers.wandb.wandb_utils import WandbLogger +from utils.plots import plot_images, plot_labels, plot_results +from utils.torch_utils import de_parallel + +LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML +RANK = int(os.getenv('RANK', -1)) + +try: + import wandb + + assert hasattr(wandb, '__version__') # verify package import not local dir + if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}: + try: + wandb_login_success = wandb.login(timeout=30) + except wandb.errors.UsageError: # known non-TTY terminal issue + wandb_login_success = False + if not wandb_login_success: + wandb = None +except (ImportError, AssertionError): + wandb = None + +try: + import clearml + + assert hasattr(clearml, '__version__') # verify package import not local dir +except (ImportError, AssertionError): + clearml = None + +try: + if RANK not in [0, -1]: + comet_ml = None + else: + import comet_ml + + assert hasattr(comet_ml, '__version__') # verify package import not local dir + from utils.loggers.comet import CometLogger + +except (ModuleNotFoundError, ImportError, AssertionError): + comet_ml = None + + +class Loggers(): + # YOLOv5 Loggers class + def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): + self.save_dir = save_dir + self.weights = weights + self.opt = opt + self.hyp = hyp + self.plots = not opt.noplots # plot results + self.logger = logger # for printing results to console + self.include = include + self.keys = [ + 'train/box_loss', + 'train/obj_loss', + 'train/cls_loss', # train loss + 'metrics/precision', + 'metrics/recall', + 'metrics/mAP_0.5', + 'metrics/mAP_0.5:0.95', # metrics + 'val/box_loss', + 'val/obj_loss', + 'val/cls_loss', # val loss + 'x/lr0', + 'x/lr1', + 'x/lr2'] # params + self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95'] + for k in LOGGERS: + setattr(self, k, None) # init empty logger dictionary + self.csv = True # always log to csv + + # Messages + # if not wandb: + # prefix = colorstr('Weights & Biases: ') + # s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" + # self.logger.info(s) + if not clearml: + prefix = colorstr('ClearML: ') + s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" + self.logger.info(s) + if not comet_ml: + prefix = colorstr('Comet: ') + s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet" + self.logger.info(s) + # TensorBoard + s = self.save_dir + if 'tb' in self.include and not self.opt.evolve: + prefix = colorstr('TensorBoard: ') + self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") + self.tb = SummaryWriter(str(s)) + + # W&B + if wandb and 'wandb' in self.include: + wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') + run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None + self.opt.hyp = self.hyp # add hyperparameters + self.wandb = WandbLogger(self.opt, run_id) + # temp warn. because nested artifacts not supported after 0.12.10 + # if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): + # s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." + # self.logger.warning(s) + else: + self.wandb = None + + # ClearML + if clearml and 'clearml' in self.include: + self.clearml = ClearmlLogger(self.opt, self.hyp) + else: + self.clearml = None + + # Comet + if comet_ml and 'comet' in self.include: + if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"): + run_id = self.opt.resume.split("/")[-1] + self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) + + else: + self.comet_logger = CometLogger(self.opt, self.hyp) + + else: + self.comet_logger = None + + @property + def remote_dataset(self): + # Get data_dict if custom dataset artifact link is provided + data_dict = None + if self.clearml: + data_dict = self.clearml.data_dict + if self.wandb: + data_dict = self.wandb.data_dict + if self.comet_logger: + data_dict = self.comet_logger.data_dict + + return data_dict + + def on_train_start(self): + if self.comet_logger: + self.comet_logger.on_train_start() + + def on_pretrain_routine_start(self): + if self.comet_logger: + self.comet_logger.on_pretrain_routine_start() + + def on_pretrain_routine_end(self, labels, names): + # Callback runs on pre-train routine end + if self.plots: + plot_labels(labels, names, self.save_dir) + paths = self.save_dir.glob('*labels*.jpg') # training labels + if self.wandb: + self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) + # if self.clearml: + # pass # ClearML saves these images automatically using hooks + if self.comet_logger: + self.comet_logger.on_pretrain_routine_end(paths) + + def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): + log_dict = dict(zip(self.keys[0:3], vals)) + # Callback runs on train batch end + # ni: number integrated batches (since train start) + if self.plots: + if ni < 3: + f = self.save_dir / f'train_batch{ni}.jpg' # filename + plot_images(imgs, targets, paths, f) + if ni == 0 and self.tb and not self.opt.sync_bn: + log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz)) + if ni == 10 and (self.wandb or self.clearml): + files = sorted(self.save_dir.glob('train*.jpg')) + if self.wandb: + self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) + if self.clearml: + self.clearml.log_debug_samples(files, title='Mosaics') + + if self.comet_logger: + self.comet_logger.on_train_batch_end(log_dict, step=ni) + + def on_train_epoch_end(self, epoch): + # Callback runs on train epoch end + if self.wandb: + self.wandb.current_epoch = epoch + 1 + + if self.comet_logger: + self.comet_logger.on_train_epoch_end(epoch) + + def on_val_start(self): + if self.comet_logger: + self.comet_logger.on_val_start() + + def on_val_image_end(self, pred, predn, path, names, im): + # Callback runs on val image end + if self.wandb: + self.wandb.val_one_image(pred, predn, path, names, im) + if self.clearml: + self.clearml.log_image_with_boxes(path, pred, names, im) + + def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out): + if self.comet_logger: + self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out) + + def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): + # Callback runs on val end + if self.wandb or self.clearml: + files = sorted(self.save_dir.glob('val*.jpg')) + if self.wandb: + self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) + if self.clearml: + self.clearml.log_debug_samples(files, title='Validation') + + if self.comet_logger: + self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) + + def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): + # Callback runs at the end of each fit (train+val) epoch + x = dict(zip(self.keys, vals)) + if self.csv: + file = self.save_dir / 'results.csv' + n = len(x) + 1 # number of cols + s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header + with open(file, 'a') as f: + f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + + if self.tb: + for k, v in x.items(): + self.tb.add_scalar(k, v, epoch) + elif self.clearml: # log to ClearML if TensorBoard not used + for k, v in x.items(): + title, series = k.split('/') + self.clearml.task.get_logger().report_scalar(title, series, v, epoch) + + if self.wandb: + if best_fitness == fi: + best_results = [epoch] + vals[3:7] + for i, name in enumerate(self.best_keys): + self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary + self.wandb.log(x) + self.wandb.end_epoch(best_result=best_fitness == fi) + + if self.clearml: + self.clearml.current_epoch_logged_images = set() # reset epoch image limit + self.clearml.current_epoch += 1 + + if self.comet_logger: + self.comet_logger.on_fit_epoch_end(x, epoch=epoch) + + def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): + # Callback runs on model save event + if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1: + if self.wandb: + self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) + if self.clearml: + self.clearml.task.update_output_model(model_path=str(last), + model_name='Latest Model', + auto_delete_file=False) + + if self.comet_logger: + self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi) + + def on_train_end(self, last, best, epoch, results): + # Callback runs on training end, i.e. saving best model + if self.plots: + plot_results(file=self.save_dir / 'results.csv') # save results.png + files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter + self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}") + + if self.tb and not self.clearml: # These images are already captured by ClearML by now, we don't want doubles + for f in files: + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') + + if self.wandb: + self.wandb.log(dict(zip(self.keys[3:10], results))) + self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model + if not self.opt.evolve: + wandb.log_artifact(str(best if best.exists() else last), + type='model', + name=f'run_{self.wandb.wandb_run.id}_model', + aliases=['latest', 'best', 'stripped']) + self.wandb.finish_run() + + if self.clearml and not self.opt.evolve: + self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), + name='Best Model', + auto_delete_file=False) + + if self.comet_logger: + final_results = dict(zip(self.keys[3:10], results)) + self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results) + + def on_params_update(self, params: dict): + # Update hyperparams or configs of the experiment + if self.wandb: + self.wandb.wandb_run.config.update(params, allow_val_change=True) + if self.comet_logger: + self.comet_logger.on_params_update(params) + + +class GenericLogger: + """ + YOLOv5 General purpose logger for non-task specific logging + Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) + Arguments + opt: Run arguments + console_logger: Console logger + include: loggers to include + """ + + def __init__(self, opt, console_logger, include=('tb', 'wandb')): + # init default loggers + self.save_dir = Path(opt.save_dir) + self.include = include + self.console_logger = console_logger + self.csv = self.save_dir / 'results.csv' # CSV logger + if 'tb' in self.include: + prefix = colorstr('TensorBoard: ') + self.console_logger.info( + f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/") + self.tb = SummaryWriter(str(self.save_dir)) + + if wandb and 'wandb' in self.include: + self.wandb = wandb.init(project=web_project_name(str(opt.project)), + name=None if opt.name == "exp" else opt.name, + config=opt) + else: + self.wandb = None + + def log_metrics(self, metrics, epoch): + # Log metrics dictionary to all loggers + if self.csv: + keys, vals = list(metrics.keys()), list(metrics.values()) + n = len(metrics) + 1 # number of cols + s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header + with open(self.csv, 'a') as f: + f.write(s + ('%23.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + + if self.tb: + for k, v in metrics.items(): + self.tb.add_scalar(k, v, epoch) + + if self.wandb: + self.wandb.log(metrics, step=epoch) + + def log_images(self, files, name='Images', epoch=0): + # Log images to all loggers + files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path + files = [f for f in files if f.exists()] # filter by exists + + if self.tb: + for f in files: + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') + + if self.wandb: + self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch) + + def log_graph(self, model, imgsz=(640, 640)): + # Log model graph to all loggers + if self.tb: + log_tensorboard_graph(self.tb, model, imgsz) + + def log_model(self, model_path, epoch=0, metadata={}): + # Log model to all loggers + if self.wandb: + art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) + art.add_file(str(model_path)) + wandb.log_artifact(art) + + def update_params(self, params): + # Update the paramters logged + if self.wandb: + wandb.run.config.update(params, allow_val_change=True) + + +def log_tensorboard_graph(tb, model, imgsz=(640, 640)): + # Log model graph to TensorBoard + try: + p = next(model.parameters()) # for device, type + imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand + im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image (WARNING: must be zeros, not empty) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) + except Exception as e: + LOGGER.warning(f'WARNING ⚠️ TensorBoard graph visualization failure {e}') + + +def web_project_name(project): + # Convert local project name to web project name + if not project.startswith('runs/train'): + return project + suffix = '-Classify' if project.endswith('-cls') else '-Segment' if project.endswith('-seg') else '' + return f'YOLOv5{suffix}' diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md new file mode 100644 index 0000000..64eef6b --- /dev/null +++ b/utils/loggers/clearml/README.md @@ -0,0 +1,222 @@ +# ClearML Integration + +Clear|MLClear|ML + +## About ClearML + +[ClearML](https://cutt.ly/yolov5-tutorial-clearml) is an [open-source](https://github.com/allegroai/clearml) toolbox designed to save you time ⏱️. + +🔨 Track every YOLOv5 training run in the experiment manager + +🔧 Version and easily access your custom training data with the integrated ClearML Data Versioning Tool + +🔦 Remotely train and monitor your YOLOv5 training runs using ClearML Agent + +🔬 Get the very best mAP using ClearML Hyperparameter Optimization + +🔭 Turn your newly trained YOLOv5 model into an API with just a few commands using ClearML Serving + +
+And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline! +
+
+ +![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif) + + +
+
+ +## 🦾 Setting Things Up + +To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one: + +Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-tutorial-clearml) or you can set up your own server, see [here](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). Even the server is open-source, so even if you're dealing with sensitive data, you should be good to go! + +1. Install the `clearml` python package: + + ```bash + pip install clearml + ``` + +1. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions: + + ```bash + clearml-init + ``` + +That's it! You're done 😎 + +
+ +## 🚀 Training YOLOv5 With ClearML + +To enable ClearML experiment tracking, simply install the ClearML pip package. + +```bash +pip install clearml +``` + +This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, head over to our custom logger, where you can change it: `utils/loggers/clearml/clearml_utils.py` + +```bash +python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache +``` + +This will capture: +- Source code + uncommitted changes +- Installed packages +- (Hyper)parameters +- Model files (use `--save-period n` to save a checkpoint every n epochs) +- Console output +- Scalars (mAP_0.5, mAP_0.5:0.95, precision, recall, losses, learning rates, ...) +- General info such as machine details, runtime, creation date etc. +- All produced plots such as label correlogram and confusion matrix +- Images with bounding boxes per epoch +- Mosaic per epoch +- Validation images per epoch +- ... + +That's a lot right? 🤯 +Now, we can visualize all of this information in the ClearML UI to get an overview of our training progress. Add custom columns to the table view (such as e.g. mAP_0.5) so you can easily sort on the best performing model. Or select multiple experiments and directly compare them! + +There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works! + +
+ +## 🔗 Dataset Version Management + +Versioning your data separately from your code is generally a good idea and makes it easy to aqcuire the latest version too. This repository supports supplying a dataset version ID and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! + +![ClearML Dataset Interface](https://github.com/thepycoder/clearml_screenshots/raw/main/clearml_data.gif) + +### Prepare Your Dataset + +The YOLOv5 repository supports a number of different datasets by using yaml files containing their information. By default datasets are downloaded to the `../datasets` folder in relation to the repository root folder. So if you downloaded the `coco128` dataset using the link in the yaml or with the scripts provided by yolov5, you get this folder structure: + +``` +.. +|_ yolov5 +|_ datasets + |_ coco128 + |_ images + |_ labels + |_ LICENSE + |_ README.txt +``` +But this can be any dataset you wish. Feel free to use your own, as long as you keep to this folder structure. + +Next, ⚠️**copy the corresponding yaml file to the root of the dataset folder**⚠️. This yaml files contains the information ClearML will need to properly use the dataset. You can make this yourself too, of course, just follow the structure of the example yamls. + +Basically we need the following keys: `path`, `train`, `test`, `val`, `nc`, `names`. + +``` +.. +|_ yolov5 +|_ datasets + |_ coco128 + |_ images + |_ labels + |_ coco128.yaml # <---- HERE! + |_ LICENSE + |_ README.txt +``` + +### Upload Your Dataset + +To get this dataset into ClearML as a versionned dataset, go to the dataset root folder and run the following command: +```bash +cd coco128 +clearml-data sync --project YOLOv5 --name coco128 --folder . +``` + +The command `clearml-data sync` is actually a shorthand command. You could also run these commands one after the other: +```bash +# Optionally add --parent if you want to base +# this version on another dataset version, so no duplicate files are uploaded! +clearml-data create --name coco128 --project YOLOv5 +clearml-data add --files . +clearml-data close +``` + +### Run Training Using A ClearML Dataset + +Now that you have a ClearML dataset, you can very simply use it to train custom YOLOv5 🚀 models! + +```bash +python train.py --img 640 --batch 16 --epochs 3 --data clearml:// --weights yolov5s.pt --cache +``` + +
+ +## 👀 Hyperparameter Optimization + +Now that we have our experiments and data versioned, it's time to take a look at what we can build on top! + +Using the code information, installed packages and environment details, the experiment itself is now **completely reproducible**. In fact, ClearML allows you to clone an experiment and even change its parameters. We can then just rerun it with these new parameters automatically, this is basically what HPO does! + +To **run hyperparameter optimization locally**, we've included a pre-made script for you. Just make sure a training task has been run at least once, so it is in the ClearML experiment manager, we will essentially clone it and change its hyperparameters. + +You'll need to fill in the ID of this `template task` in the script found at `utils/loggers/clearml/hpo.py` and then just run it :) You can change `task.execute_locally()` to `task.execute()` to put it in a ClearML queue and have a remote agent work on it instead. + +```bash +# To use optuna, install it first, otherwise you can change the optimizer to just be RandomSearch +pip install optuna +python utils/loggers/clearml/hpo.py +``` + +![HPO](https://github.com/thepycoder/clearml_screenshots/raw/main/hpo.png) + +## 🤯 Remote Execution (advanced) + +Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site or you have some budget to use cloud GPUs. +This is where the ClearML Agent comes into play. Check out what the agent can do here: + +- [YouTube video](https://youtu.be/MX3BrXnaULs) +- [Documentation](https://clear.ml/docs/latest/docs/clearml_agent) + +In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager. + +You can turn any machine (a cloud VM, a local GPU machine, your own laptop ... ) into a ClearML agent by simply running: +```bash +clearml-agent daemon --queue [--docker] +``` + +### Cloning, Editing And Enqueuing + +With our agent running, we can give it some work. Remember from the HPO section that we can clone a task and edit the hyperparameters? We can do that from the interface too! + +🪄 Clone the experiment by right clicking it + +🎯 Edit the hyperparameters to what you wish them to be + +⏳ Enqueue the task to any of the queues by right clicking it + +![Enqueue a task from the UI](https://github.com/thepycoder/clearml_screenshots/raw/main/enqueue.gif) + +### Executing A Task Remotely + +Now you can clone a task like we explained above, or simply mark your current script by adding `task.execute_remotely()` and on execution it will be put into a queue, for the agent to start working on! + +To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instatiated: +```python +# ... +# Loggers +data_dict = None +if RANK in {-1, 0}: + loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + if loggers.clearml: + loggers.clearml.task.execute_remotely(queue='my_queue') # <------ ADD THIS LINE + # Data_dict is either None is user did not choose for ClearML dataset or is filled in by ClearML + data_dict = loggers.clearml.data_dict +# ... +``` +When running the training script after this change, python will run the script up until that line, after which it will package the code and send it to the queue instead! + +### Autoscaling workers + +ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines and you stop paying! + +Check out the autoscalers getting started video below. + +[![Watch the video](https://img.youtube.com/vi/j4XVMAaUt3E/0.jpg)](https://youtu.be/j4XVMAaUt3E) diff --git a/utils/loggers/clearml/__init__.py b/utils/loggers/clearml/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py new file mode 100644 index 0000000..eb1c12c --- /dev/null +++ b/utils/loggers/clearml/clearml_utils.py @@ -0,0 +1,157 @@ +"""Main Logger class for ClearML experiment tracking.""" +import glob +import re +from pathlib import Path + +import numpy as np +import yaml + +from utils.plots import Annotator, colors + +try: + import clearml + from clearml import Dataset, Task + + assert hasattr(clearml, '__version__') # verify package import not local dir +except (ImportError, AssertionError): + clearml = None + + +def construct_dataset(clearml_info_string): + """Load in a clearml dataset and fill the internal data_dict with its contents. + """ + dataset_id = clearml_info_string.replace('clearml://', '') + dataset = Dataset.get(dataset_id=dataset_id) + dataset_root_path = Path(dataset.get_local_copy()) + + # We'll search for the yaml file definition in the dataset + yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml"))) + if len(yaml_filenames) > 1: + raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains ' + 'the dataset definition this way.') + elif len(yaml_filenames) == 0: + raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file ' + 'inside the dataset root path.') + with open(yaml_filenames[0]) as f: + dataset_definition = yaml.safe_load(f) + + assert set(dataset_definition.keys()).issuperset( + {'train', 'test', 'val', 'nc', 'names'} + ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" + + data_dict = dict() + data_dict['train'] = str( + (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None + data_dict['test'] = str( + (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None + data_dict['val'] = str( + (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None + data_dict['nc'] = dataset_definition['nc'] + data_dict['names'] = dataset_definition['names'] + + return data_dict + + +class ClearmlLogger: + """Log training runs, datasets, models, and predictions to ClearML. + + This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default, + this information includes hyperparameters, system configuration and metrics, model metrics, code information and + basic data metrics and analyses. + + By providing additional command line arguments to train.py, datasets, + models and predictions can also be logged. + """ + + def __init__(self, opt, hyp): + """ + - Initialize ClearML Task, this object will capture the experiment + - Upload dataset version to ClearML Data if opt.upload_dataset is True + + arguments: + opt (namespace) -- Commandline arguments for this run + hyp (dict) -- Hyperparameters for this run + + """ + self.current_epoch = 0 + # Keep tracked of amount of logged images to enforce a limit + self.current_epoch_logged_images = set() + # Maximum number of images to log to clearML per epoch + self.max_imgs_to_log_per_epoch = 16 + # Get the interval of epochs when bounding box images should be logged + self.bbox_interval = opt.bbox_interval + self.clearml = clearml + self.task = None + self.data_dict = None + if self.clearml: + self.task = Task.init( + project_name='YOLOv5', + task_name='training', + tags=['YOLOv5'], + output_uri=True, + auto_connect_frameworks={'pytorch': False} + # We disconnect pytorch auto-detection, because we added manual model save points in the code + ) + # ClearML's hooks will already grab all general parameters + # Only the hyperparameters coming from the yaml config file + # will have to be added manually! + self.task.connect(hyp, name='Hyperparameters') + + # Get ClearML Dataset Version if requested + if opt.data.startswith('clearml://'): + # data_dict should have the following keys: + # names, nc (number of classes), test, train, val (all three relative paths to ../datasets) + self.data_dict = construct_dataset(opt.data) + # Set data to data_dict because wandb will crash without this information and opt is the best way + # to give it to them + opt.data = self.data_dict + + def log_debug_samples(self, files, title='Debug Samples'): + """ + Log files (images) as debug samples in the ClearML task. + + arguments: + files (List(PosixPath)) a list of file paths in PosixPath format + title (str) A title that groups together images with the same values + """ + for f in files: + if f.exists(): + it = re.search(r'_batch(\d+)', f.name) + iteration = int(it.groups()[0]) if it else 0 + self.task.get_logger().report_image(title=title, + series=f.name.replace(it.group(), ''), + local_path=str(f), + iteration=iteration) + + def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25): + """ + Draw the bounding boxes on a single image and report the result as a ClearML debug sample. + + arguments: + image_path (PosixPath) the path the original image file + boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] + class_names (dict): dict containing mapping of class int to class name + image (Tensor): A torch tensor containing the actual image data + """ + if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0: + # Log every bbox_interval times and deduplicate for any intermittend extra eval runs + if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images: + im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2)) + annotator = Annotator(im=im, pil=True) + for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])): + color = colors(i) + + class_name = class_names[int(class_nr)] + confidence_percentage = round(float(conf) * 100, 2) + label = f"{class_name}: {confidence_percentage}%" + + if conf > conf_threshold: + annotator.rectangle(box.cpu().numpy(), outline=color) + annotator.box_label(box.cpu().numpy(), label=label, color=color) + + annotated_image = annotator.result() + self.task.get_logger().report_image(title='Bounding Boxes', + series=image_path.name, + iteration=self.current_epoch, + image=annotated_image) + self.current_epoch_logged_images.add(image_path) diff --git a/utils/loggers/clearml/hpo.py b/utils/loggers/clearml/hpo.py new file mode 100644 index 0000000..ee518b0 --- /dev/null +++ b/utils/loggers/clearml/hpo.py @@ -0,0 +1,84 @@ +from clearml import Task +# Connecting ClearML with the current process, +# from here on everything is logged automatically +from clearml.automation import HyperParameterOptimizer, UniformParameterRange +from clearml.automation.optuna import OptimizerOptuna + +task = Task.init(project_name='Hyper-Parameter Optimization', + task_name='YOLOv5', + task_type=Task.TaskTypes.optimizer, + reuse_last_task_id=False) + +# Example use case: +optimizer = HyperParameterOptimizer( + # This is the experiment we want to optimize + base_task_id='', + # here we define the hyper-parameters to optimize + # Notice: The parameter name should exactly match what you see in the UI: / + # For Example, here we see in the base experiment a section Named: "General" + # under it a parameter named "batch_size", this becomes "General/batch_size" + # If you have `argparse` for example, then arguments will appear under the "Args" section, + # and you should instead pass "Args/batch_size" + hyper_parameters=[ + UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1), + UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0), + UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98), + UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001), + UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0), + UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95), + UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2), + UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2), + UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0), + UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0), + UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0), + UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0), + UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7), + UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0), + UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0), + UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1), + UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0), + UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0), + UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001), + UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)], + # this is the objective metric we want to maximize/minimize + objective_metric_title='metrics', + objective_metric_series='mAP_0.5', + # now we decide if we want to maximize it or minimize it (accuracy we maximize) + objective_metric_sign='max', + # let us limit the number of concurrent experiments, + # this in turn will make sure we do dont bombard the scheduler with experiments. + # if we have an auto-scaler connected, this, by proxy, will limit the number of machine + max_number_of_concurrent_tasks=1, + # this is the optimizer class (actually doing the optimization) + # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band) + optimizer_class=OptimizerOptuna, + # If specified only the top K performing Tasks will be kept, the others will be automatically archived + save_top_k_tasks_only=5, # 5, + compute_time_limit=None, + total_max_jobs=20, + min_iteration_per_job=None, + max_iteration_per_job=None, +) + +# report every 10 seconds, this is way too often, but we are testing here +optimizer.set_report_period(10 / 60) +# You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent +# an_optimizer.start_locally(job_complete_callback=job_complete_callback) +# set the time limit for the optimization process (2 hours) +optimizer.set_time_limit(in_minutes=120.0) +# Start the optimization process in the local environment +optimizer.start_locally() +# wait until process is done (notice we are controlling the optimization process in the background) +optimizer.wait() +# make sure background optimization stopped +optimizer.stop() + +print('We are done, good bye') diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md new file mode 100644 index 0000000..3a51cb9 --- /dev/null +++ b/utils/loggers/comet/README.md @@ -0,0 +1,256 @@ + + +# YOLOv5 with Comet + +This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet) + +# About Comet + +Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. + +Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! +Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! + +# Getting Started + +## Install Comet + +```shell +pip install comet_ml +``` + +## Configure Comet Credentials + +There are two ways to configure Comet with YOLOv5. + +You can either set your credentials through enviroment variables + +**Environment Variables** + +```shell +export COMET_API_KEY= +export COMET_PROJECT_NAME= # This will default to 'yolov5' +``` + +Or create a `.comet.config` file in your working directory and set your credentials there. + +**Comet Configuration File** + +``` +[comet] +api_key= +project_name= # This will default to 'yolov5' +``` + +## Run the Training Script + +```shell +# Train YOLOv5s on COCO128 for 5 epochs +python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt +``` + +That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI + +yolo-ui + +# Try out an Example! +Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) + +Or better yet, try it out yourself in this Colab Notebook + +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing) + +# Log automatically + +By default, Comet will log the following items + +## Metrics +- Box Loss, Object Loss, Classification Loss for the training and validation data +- mAP_0.5, mAP_0.5:0.95 metrics for the validation data. +- Precision and Recall for the validation data + +## Parameters + +- Model Hyperparameters +- All parameters passed through the command line options + +## Visualizations + +- Confusion Matrix of the model predictions on the validation data +- Plots for the PR and F1 curves across all classes +- Correlogram of the Class Labels + +# Configure Comet Logging + +Comet can be configured to log additional data either through command line flags passed to the training script +or through environment variables. + +```shell +export COMET_MODE=online # Set whether to run Comet in 'online' or 'offline' mode. Defaults to online +export COMET_MODEL_NAME= #Set the name for the saved model. Defaults to yolov5 +export COMET_LOG_CONFUSION_MATRIX=false # Set to disable logging a Comet Confusion Matrix. Defaults to true +export COMET_MAX_IMAGE_UPLOADS= # Controls how many total image predictions to log to Comet. Defaults to 100. +export COMET_LOG_PER_CLASS_METRICS=true # Set to log evaluation metrics for each detected class at the end of training. Defaults to false +export COMET_DEFAULT_CHECKPOINT_FILENAME= # Set this if you would like to resume training from a different checkpoint. Defaults to 'last.pt' +export COMET_LOG_BATCH_LEVEL_METRICS=true # Set this if you would like to log training metrics at the batch level. Defaults to false. +export COMET_LOG_PREDICTIONS=true # Set this to false to disable logging model predictions +``` + +## Logging Checkpoints with Comet + +Logging Models to Comet is disabled by default. To enable it, pass the `save-period` argument to the training script. This will save the +logged checkpoints to Comet based on the interval value provided by `save-period` + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--save-period 1 +``` + +## Logging Model Predictions + +By default, model predictions (images, ground truth labels and bounding boxes) will be logged to Comet. + +You can control the frequency of logged predictions and the associated images by passing the `bbox_interval` command line argument. Predictions can be visualized using Comet's Object Detection Custom Panel. This frequency corresponds to every Nth batch of data per epoch. In the example below, we are logging every 2nd batch of data for each epoch. + +**Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly. + +Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) + + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--bbox_interval 2 +``` + +### Controlling the number of Prediction Images logged to Comet + +When logging predictions from YOLOv5, Comet will log the images associated with each set of predictions. By default a maximum of 100 validation images are logged. You can increase or decrease this number using the `COMET_MAX_IMAGE_UPLOADS` environment variable. + +```shell +env COMET_MAX_IMAGE_UPLOADS=200 python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--bbox_interval 1 +``` + +### Logging Class Level Metrics + +Use the `COMET_LOG_PER_CLASS_METRICS` environment variable to log mAP, precision, recall, f1 for each class. + +```shell +env COMET_LOG_PER_CLASS_METRICS=true python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt +``` + +## Uploading a Dataset to Comet Artifacts + +If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration), you can do so using the `upload_dataset` flag. + +The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--upload_dataset +``` + +You can find the uploaded dataset in the Artifacts tab in your Comet Workspace +artifact-1 + +You can preview the data directly in the Comet UI. +artifact-2 + +Artifacts are versioned and also support adding metadata about the dataset. Comet will automatically log the metadata from your dataset `yaml` file +artifact-3 + +### Using a saved Artifact + +If you would like to use a dataset from Comet Artifacts, set the `path` variable in your dataset `yaml` file to point to the following Artifact resource URL. + +``` +# contents of artifact.yaml file +path: "comet:///:" +``` +Then pass this file to your training script in the following way + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data artifact.yaml \ +--weights yolov5s.pt +``` + +Artifacts also allow you to track the lineage of data as it flows through your Experimentation workflow. Here you can see a graph that shows you all the experiments that have used your uploaded dataset. +artifact-4 + +## Resuming a Training Run + +If your training run is interrupted for any reason, e.g. disrupted internet connection, you can resume the run using the `resume` flag and the Comet Run Path. + +The Run Path has the following format `comet:////`. + +This will restore the run to its state before the interruption, which includes restoring the model from a checkpoint, restoring all hyperparameters and training arguments and downloading Comet dataset Artifacts if they were used in the original run. The resumed run will continue logging to the existing Experiment in the Comet UI + +```shell +python train.py \ +--resume "comet://" +``` + +## Hyperparameter Search with the Comet Optimizer + +YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualie hyperparameter sweeps in the Comet UI. + +### Configuring an Optimizer Sweep + +To configure the Comet Optimizer, you will have to create a JSON file with the information about the sweep. An example file has been provided in `utils/loggers/comet/optimizer_config.json` + +```shell +python utils/loggers/comet/hpo.py \ + --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" +``` + +The `hpo.py` script accepts the same arguments as `train.py`. If you wish to pass additional arguments to your sweep simply add them after +the script. + +```shell +python utils/loggers/comet/hpo.py \ + --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" \ + --save-period 1 \ + --bbox_interval 1 +``` + +### Running a Sweep in Parallel + +```shell +comet optimizer -j utils/loggers/comet/hpo.py \ + utils/loggers/comet/optimizer_config.json" +``` + +### Visualizing Results + +Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) + +hyperparameter-yolo diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py new file mode 100644 index 0000000..b0318f8 --- /dev/null +++ b/utils/loggers/comet/__init__.py @@ -0,0 +1,508 @@ +import glob +import json +import logging +import os +import sys +from pathlib import Path + +logger = logging.getLogger(__name__) + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +try: + import comet_ml + + # Project Configuration + config = comet_ml.config.get_config() + COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") +except (ModuleNotFoundError, ImportError): + comet_ml = None + COMET_PROJECT_NAME = None + +import PIL +import torch +import torchvision.transforms as T +import yaml + +from utils.dataloaders import img2label_paths +from utils.general import check_dataset, scale_boxes, xywh2xyxy +from utils.metrics import box_iou + +COMET_PREFIX = "comet://" + +COMET_MODE = os.getenv("COMET_MODE", "online") + +# Model Saving Settings +COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") + +# Dataset Artifact Settings +COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true" + +# Evaluation Settings +COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true" +COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true" +COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100)) + +# Confusion Matrix Settings +CONF_THRES = float(os.getenv("CONF_THRES", 0.001)) +IOU_THRES = float(os.getenv("IOU_THRES", 0.6)) + +# Batch Logging Settings +COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true" +COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1) +COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1) +COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true" + +RANK = int(os.getenv("RANK", -1)) + +to_pil = T.ToPILImage() + + +class CometLogger: + """Log metrics, parameters, source code, models and much more + with Comet + """ + + def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: + self.job_type = job_type + self.opt = opt + self.hyp = hyp + + # Comet Flags + self.comet_mode = COMET_MODE + + self.save_model = opt.save_period > -1 + self.model_name = COMET_MODEL_NAME + + # Batch Logging Settings + self.log_batch_metrics = COMET_LOG_BATCH_METRICS + self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL + + # Dataset Artifact Settings + self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET + self.resume = self.opt.resume + + # Default parameters to pass to Experiment objects + self.default_experiment_kwargs = { + "log_code": False, + "log_env_gpu": True, + "log_env_cpu": True, + "project_name": COMET_PROJECT_NAME,} + self.default_experiment_kwargs.update(experiment_kwargs) + self.experiment = self._get_experiment(self.comet_mode, run_id) + + self.data_dict = self.check_dataset(self.opt.data) + self.class_names = self.data_dict["names"] + self.num_classes = self.data_dict["nc"] + + self.logged_images_count = 0 + self.max_images = COMET_MAX_IMAGE_UPLOADS + + if run_id is None: + self.experiment.log_other("Created from", "YOLOv5") + if not isinstance(self.experiment, comet_ml.OfflineExperiment): + workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:] + self.experiment.log_other( + "Run Path", + f"{workspace}/{project_name}/{experiment_id}", + ) + self.log_parameters(vars(opt)) + self.log_parameters(self.opt.hyp) + self.log_asset_data( + self.opt.hyp, + name="hyperparameters.json", + metadata={"type": "hyp-config-file"}, + ) + self.log_asset( + f"{self.opt.save_dir}/opt.yaml", + metadata={"type": "opt-config-file"}, + ) + + self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX + + if hasattr(self.opt, "conf_thres"): + self.conf_thres = self.opt.conf_thres + else: + self.conf_thres = CONF_THRES + if hasattr(self.opt, "iou_thres"): + self.iou_thres = self.opt.iou_thres + else: + self.iou_thres = IOU_THRES + + self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres}) + + self.comet_log_predictions = COMET_LOG_PREDICTIONS + if self.opt.bbox_interval == -1: + self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10 + else: + self.comet_log_prediction_interval = self.opt.bbox_interval + + if self.comet_log_predictions: + self.metadata_dict = {} + self.logged_image_names = [] + + self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS + + self.experiment.log_others({ + "comet_mode": COMET_MODE, + "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS, + "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS, + "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS, + "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX, + "comet_model_name": COMET_MODEL_NAME,}) + + # Check if running the Experiment with the Comet Optimizer + if hasattr(self.opt, "comet_optimizer_id"): + self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id) + self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective) + self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric) + self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp)) + + def _get_experiment(self, mode, experiment_id=None): + if mode == "offline": + if experiment_id is not None: + return comet_ml.ExistingOfflineExperiment( + previous_experiment=experiment_id, + **self.default_experiment_kwargs, + ) + + return comet_ml.OfflineExperiment(**self.default_experiment_kwargs,) + + else: + try: + if experiment_id is not None: + return comet_ml.ExistingExperiment( + previous_experiment=experiment_id, + **self.default_experiment_kwargs, + ) + + return comet_ml.Experiment(**self.default_experiment_kwargs) + + except ValueError: + logger.warning("COMET WARNING: " + "Comet credentials have not been set. " + "Comet will default to offline logging. " + "Please set your credentials to enable online logging.") + return self._get_experiment("offline", experiment_id) + + return + + def log_metrics(self, log_dict, **kwargs): + self.experiment.log_metrics(log_dict, **kwargs) + + def log_parameters(self, log_dict, **kwargs): + self.experiment.log_parameters(log_dict, **kwargs) + + def log_asset(self, asset_path, **kwargs): + self.experiment.log_asset(asset_path, **kwargs) + + def log_asset_data(self, asset, **kwargs): + self.experiment.log_asset_data(asset, **kwargs) + + def log_image(self, img, **kwargs): + self.experiment.log_image(img, **kwargs) + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + if not self.save_model: + return + + model_metadata = { + "fitness_score": fitness_score[-1], + "epochs_trained": epoch + 1, + "save_period": opt.save_period, + "total_epochs": opt.epochs,} + + model_files = glob.glob(f"{path}/*.pt") + for model_path in model_files: + name = Path(model_path).name + + self.experiment.log_model( + self.model_name, + file_or_folder=model_path, + file_name=name, + metadata=model_metadata, + overwrite=True, + ) + + def check_dataset(self, data_file): + with open(data_file) as f: + data_config = yaml.safe_load(f) + + if data_config['path'].startswith(COMET_PREFIX): + path = data_config['path'].replace(COMET_PREFIX, "") + data_dict = self.download_dataset_artifact(path) + + return data_dict + + self.log_asset(self.opt.data, metadata={"type": "data-config-file"}) + + return check_dataset(data_file) + + def log_predictions(self, image, labelsn, path, shape, predn): + if self.logged_images_count >= self.max_images: + return + detections = predn[predn[:, 4] > self.conf_thres] + iou = box_iou(labelsn[:, 1:], detections[:, :4]) + mask, _ = torch.where(iou > self.iou_thres) + if len(mask) == 0: + return + + filtered_detections = detections[mask] + filtered_labels = labelsn[mask] + + image_id = path.split("/")[-1].split(".")[0] + image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" + if image_name not in self.logged_image_names: + native_scale_image = PIL.Image.open(path) + self.log_image(native_scale_image, name=image_name) + self.logged_image_names.append(image_name) + + metadata = [] + for cls, *xyxy in filtered_labels.tolist(): + metadata.append({ + "label": f"{self.class_names[int(cls)]}-gt", + "score": 100, + "box": { + "x": xyxy[0], + "y": xyxy[1], + "x2": xyxy[2], + "y2": xyxy[3]},}) + for *xyxy, conf, cls in filtered_detections.tolist(): + metadata.append({ + "label": f"{self.class_names[int(cls)]}", + "score": conf * 100, + "box": { + "x": xyxy[0], + "y": xyxy[1], + "x2": xyxy[2], + "y2": xyxy[3]},}) + + self.metadata_dict[image_name] = metadata + self.logged_images_count += 1 + + return + + def preprocess_prediction(self, image, labels, shape, pred): + nl, _ = labels.shape[0], pred.shape[0] + + # Predictions + if self.opt.single_cls: + pred[:, 5] = 0 + + predn = pred.clone() + scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) + + labelsn = None + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_boxes(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred + + return predn, labelsn + + def add_assets_to_artifact(self, artifact, path, asset_path, split): + img_paths = sorted(glob.glob(f"{asset_path}/*")) + label_paths = img2label_paths(img_paths) + + for image_file, label_file in zip(img_paths, label_paths): + image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) + + try: + artifact.add(image_file, logical_path=image_logical_path, metadata={"split": split}) + artifact.add(label_file, logical_path=label_logical_path, metadata={"split": split}) + except ValueError as e: + logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') + logger.error(f"COMET ERROR: {e}") + continue + + return artifact + + def upload_dataset_artifact(self): + dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset") + path = str((ROOT / Path(self.data_dict["path"])).resolve()) + + metadata = self.data_dict.copy() + for key in ["train", "val", "test"]: + split_path = metadata.get(key) + if split_path is not None: + metadata[key] = split_path.replace(path, "") + + artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata) + for key in metadata.keys(): + if key in ["train", "val", "test"]: + if isinstance(self.upload_dataset, str) and (key != self.upload_dataset): + continue + + asset_path = self.data_dict.get(key) + if asset_path is not None: + artifact = self.add_assets_to_artifact(artifact, path, asset_path, key) + + self.experiment.log_artifact(artifact) + + return + + def download_dataset_artifact(self, artifact_path): + logged_artifact = self.experiment.get_artifact(artifact_path) + artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name) + logged_artifact.download(artifact_save_dir) + + metadata = logged_artifact.metadata + data_dict = metadata.copy() + data_dict["path"] = artifact_save_dir + + metadata_names = metadata.get("names") + if type(metadata_names) == dict: + data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + elif type(metadata_names) == list: + data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} + else: + raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" + + data_dict = self.update_data_paths(data_dict) + return data_dict + + def update_data_paths(self, data_dict): + path = data_dict.get("path", "") + + for split in ["train", "val", "test"]: + if data_dict.get(split): + split_path = data_dict.get(split) + data_dict[split] = (f"{path}/{split_path}" if isinstance(split, str) else [ + f"{path}/{x}" for x in split_path]) + + return data_dict + + def on_pretrain_routine_end(self, paths): + if self.opt.resume: + return + + for path in paths: + self.log_asset(str(path)) + + if self.upload_dataset: + if not self.resume: + self.upload_dataset_artifact() + + return + + def on_train_start(self): + self.log_parameters(self.hyp) + + def on_train_epoch_start(self): + return + + def on_train_epoch_end(self, epoch): + self.experiment.curr_epoch = epoch + + return + + def on_train_batch_start(self): + return + + def on_train_batch_end(self, log_dict, step): + self.experiment.curr_step = step + if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0): + self.log_metrics(log_dict, step=step) + + return + + def on_train_end(self, files, save_dir, last, best, epoch, results): + if self.comet_log_predictions: + curr_epoch = self.experiment.curr_epoch + self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch) + + for f in files: + self.log_asset(f, metadata={"epoch": epoch}) + self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch}) + + if not self.opt.evolve: + model_path = str(best if best.exists() else last) + name = Path(model_path).name + if self.save_model: + self.experiment.log_model( + self.model_name, + file_or_folder=model_path, + file_name=name, + overwrite=True, + ) + + # Check if running Experiment with Comet Optimizer + if hasattr(self.opt, 'comet_optimizer_id'): + metric = results.get(self.opt.comet_optimizer_metric) + self.experiment.log_other('optimizer_metric_value', metric) + + self.finish_run() + + def on_val_start(self): + return + + def on_val_batch_start(self): + return + + def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs): + if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)): + return + + for si, pred in enumerate(outputs): + if len(pred) == 0: + continue + + image = images[si] + labels = targets[targets[:, 0] == si, 1:] + shape = shapes[si] + path = paths[si] + predn, labelsn = self.preprocess_prediction(image, labels, shape, pred) + if labelsn is not None: + self.log_predictions(image, labelsn, path, shape, predn) + + return + + def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): + if self.comet_log_per_class_metrics: + if self.num_classes > 1: + for i, c in enumerate(ap_class): + class_name = self.class_names[c] + self.experiment.log_metrics( + { + 'mAP@.5': ap50[i], + 'mAP@.5:.95': ap[i], + 'precision': p[i], + 'recall': r[i], + 'f1': f1[i], + 'true_positives': tp[i], + 'false_positives': fp[i], + 'support': nt[c]}, + prefix=class_name) + + if self.comet_log_confusion_matrix: + epoch = self.experiment.curr_epoch + class_names = list(self.class_names.values()) + class_names.append("background") + num_classes = len(class_names) + + self.experiment.log_confusion_matrix( + matrix=confusion_matrix.matrix, + max_categories=num_classes, + labels=class_names, + epoch=epoch, + column_label='Actual Category', + row_label='Predicted Category', + file_name=f"confusion-matrix-epoch-{epoch}.json", + ) + + def on_fit_epoch_end(self, result, epoch): + self.log_metrics(result, epoch=epoch) + + def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): + if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: + self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) + + def on_params_update(self, params): + self.log_parameters(params) + + def finish_run(self): + self.experiment.end() diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py new file mode 100644 index 0000000..3cbd451 --- /dev/null +++ b/utils/loggers/comet/comet_utils.py @@ -0,0 +1,150 @@ +import logging +import os +from urllib.parse import urlparse + +try: + import comet_ml +except (ModuleNotFoundError, ImportError): + comet_ml = None + +import yaml + +logger = logging.getLogger(__name__) + +COMET_PREFIX = "comet://" +COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") +COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt") + + +def download_model_checkpoint(opt, experiment): + model_dir = f"{opt.project}/{experiment.name}" + os.makedirs(model_dir, exist_ok=True) + + model_name = COMET_MODEL_NAME + model_asset_list = experiment.get_model_asset_list(model_name) + + if len(model_asset_list) == 0: + logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}") + return + + model_asset_list = sorted( + model_asset_list, + key=lambda x: x["step"], + reverse=True, + ) + logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list} + + resource_url = urlparse(opt.weights) + checkpoint_filename = resource_url.query + + if checkpoint_filename: + asset_id = logged_checkpoint_map.get(checkpoint_filename) + else: + asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME) + checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME + + if asset_id is None: + logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment") + return + + try: + logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}") + asset_filename = checkpoint_filename + + model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) + model_download_path = f"{model_dir}/{asset_filename}" + with open(model_download_path, "wb") as f: + f.write(model_binary) + + opt.weights = model_download_path + + except Exception as e: + logger.warning("COMET WARNING: Unable to download checkpoint from Comet") + logger.exception(e) + + +def set_opt_parameters(opt, experiment): + """Update the opts Namespace with parameters + from Comet's ExistingExperiment when resuming a run + + Args: + opt (argparse.Namespace): Namespace of command line options + experiment (comet_ml.APIExperiment): Comet API Experiment object + """ + asset_list = experiment.get_asset_list() + resume_string = opt.resume + + for asset in asset_list: + if asset["fileName"] == "opt.yaml": + asset_id = asset["assetId"] + asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) + opt_dict = yaml.safe_load(asset_binary) + for key, value in opt_dict.items(): + setattr(opt, key, value) + opt.resume = resume_string + + # Save hyperparameters to YAML file + # Necessary to pass checks in training script + save_dir = f"{opt.project}/{experiment.name}" + os.makedirs(save_dir, exist_ok=True) + + hyp_yaml_path = f"{save_dir}/hyp.yaml" + with open(hyp_yaml_path, "w") as f: + yaml.dump(opt.hyp, f) + opt.hyp = hyp_yaml_path + + +def check_comet_weights(opt): + """Downloads model weights from Comet and updates the + weights path to point to saved weights location + + Args: + opt (argparse.Namespace): Command Line arguments passed + to YOLOv5 training script + + Returns: + None/bool: Return True if weights are successfully downloaded + else return None + """ + if comet_ml is None: + return + + if isinstance(opt.weights, str): + if opt.weights.startswith(COMET_PREFIX): + api = comet_ml.API() + resource = urlparse(opt.weights) + experiment_path = f"{resource.netloc}{resource.path}" + experiment = api.get(experiment_path) + download_model_checkpoint(opt, experiment) + return True + + return None + + +def check_comet_resume(opt): + """Restores run parameters to its original state based on the model checkpoint + and logged Experiment parameters. + + Args: + opt (argparse.Namespace): Command Line arguments passed + to YOLOv5 training script + + Returns: + None/bool: Return True if the run is restored successfully + else return None + """ + if comet_ml is None: + return + + if isinstance(opt.resume, str): + if opt.resume.startswith(COMET_PREFIX): + api = comet_ml.API() + resource = urlparse(opt.resume) + experiment_path = f"{resource.netloc}{resource.path}" + experiment = api.get(experiment_path) + set_opt_parameters(opt, experiment) + download_model_checkpoint(opt, experiment) + + return True + + return None diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py new file mode 100644 index 0000000..7dd5c92 --- /dev/null +++ b/utils/loggers/comet/hpo.py @@ -0,0 +1,118 @@ +import argparse +import json +import logging +import os +import sys +from pathlib import Path + +import comet_ml + +logger = logging.getLogger(__name__) + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +from train import train +from utils.callbacks import Callbacks +from utils.general import increment_path +from utils.torch_utils import select_device + +# Project Configuration +config = comet_ml.config.get_config() +COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") + + +def get_args(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=300, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + + # Weights & Biases arguments + parser.add_argument('--entity', default=None, help='W&B: Entity') + parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') + parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') + parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + + # Comet Arguments + parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.") + parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.") + parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.") + parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.") + parser.add_argument("--comet_optimizer_workers", + type=int, + default=1, + help="Comet: Number of Parallel Workers to use with the Comet Optimizer.") + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def run(parameters, opt): + hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} + + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) + opt.batch_size = parameters.get("batch_size") + opt.epochs = parameters.get("epochs") + + device = select_device(opt.device, batch_size=opt.batch_size) + train(hyp_dict, opt, device, callbacks=Callbacks()) + + +if __name__ == "__main__": + opt = get_args(known=True) + + opt.weights = str(opt.weights) + opt.cfg = str(opt.cfg) + opt.data = str(opt.data) + opt.project = str(opt.project) + + optimizer_id = os.getenv("COMET_OPTIMIZER_ID") + if optimizer_id is None: + with open(opt.comet_optimizer_config) as f: + optimizer_config = json.load(f) + optimizer = comet_ml.Optimizer(optimizer_config) + else: + optimizer = comet_ml.Optimizer(optimizer_id) + + opt.comet_optimizer_id = optimizer.id + status = optimizer.status() + + opt.comet_optimizer_objective = status["spec"]["objective"] + opt.comet_optimizer_metric = status["spec"]["metric"] + + logger.info("COMET INFO: Starting Hyperparameter Sweep") + for parameter in optimizer.get_parameters(): + run(parameter["parameters"], opt) diff --git a/utils/loggers/comet/optimizer_config.json b/utils/loggers/comet/optimizer_config.json new file mode 100644 index 0000000..83dddda --- /dev/null +++ b/utils/loggers/comet/optimizer_config.json @@ -0,0 +1,209 @@ +{ + "algorithm": "random", + "parameters": { + "anchor_t": { + "type": "discrete", + "values": [ + 2, + 8 + ] + }, + "batch_size": { + "type": "discrete", + "values": [ + 16, + 32, + 64 + ] + }, + "box": { + "type": "discrete", + "values": [ + 0.02, + 0.2 + ] + }, + "cls": { + "type": "discrete", + "values": [ + 0.2 + ] + }, + "cls_pw": { + "type": "discrete", + "values": [ + 0.5 + ] + }, + "copy_paste": { + "type": "discrete", + "values": [ + 1 + ] + }, + "degrees": { + "type": "discrete", + "values": [ + 0, + 45 + ] + }, + "epochs": { + "type": "discrete", + "values": [ + 5 + ] + }, + "fl_gamma": { + "type": "discrete", + "values": [ + 0 + ] + }, + "fliplr": { + "type": "discrete", + "values": [ + 0 + ] + }, + "flipud": { + "type": "discrete", + "values": [ + 0 + ] + }, + "hsv_h": { + "type": "discrete", + "values": [ + 0 + ] + }, + "hsv_s": { + "type": "discrete", + "values": [ + 0 + ] + }, + "hsv_v": { + "type": "discrete", + "values": [ + 0 + ] + }, + "iou_t": { + "type": "discrete", + "values": [ + 0.7 + ] + }, + "lr0": { + "type": "discrete", + "values": [ + 1e-05, + 0.1 + ] + }, + "lrf": { + "type": "discrete", + "values": [ + 0.01, + 1 + ] + }, + "mixup": { + "type": "discrete", + "values": [ + 1 + ] + }, + "momentum": { + "type": "discrete", + "values": [ + 0.6 + ] + }, + "mosaic": { + "type": "discrete", + "values": [ + 0 + ] + }, + "obj": { + "type": "discrete", + "values": [ + 0.2 + ] + }, + "obj_pw": { + "type": "discrete", + "values": [ + 0.5 + ] + }, + "optimizer": { + "type": "categorical", + "values": [ + "SGD", + "Adam", + "AdamW" + ] + }, + "perspective": { + "type": "discrete", + "values": [ + 0 + ] + }, + "scale": { + "type": "discrete", + "values": [ + 0 + ] + }, + "shear": { + "type": "discrete", + "values": [ + 0 + ] + }, + "translate": { + "type": "discrete", + "values": [ + 0 + ] + }, + "warmup_bias_lr": { + "type": "discrete", + "values": [ + 0, + 0.2 + ] + }, + "warmup_epochs": { + "type": "discrete", + "values": [ + 5 + ] + }, + "warmup_momentum": { + "type": "discrete", + "values": [ + 0, + 0.95 + ] + }, + "weight_decay": { + "type": "discrete", + "values": [ + 0, + 0.001 + ] + } + }, + "spec": { + "maxCombo": 0, + "metric": "metrics/mAP_0.5", + "objective": "maximize" + }, + "trials": 1 +} diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md new file mode 100644 index 0000000..d78324b --- /dev/null +++ b/utils/loggers/wandb/README.md @@ -0,0 +1,162 @@ +📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. UPDATED 29 September 2021. + +- [About Weights & Biases](#about-weights-&-biases) +- [First-Time Setup](#first-time-setup) +- [Viewing runs](#viewing-runs) +- [Disabling wandb](#disabling-wandb) +- [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) +- [Reports: Share your work with the world!](#reports) + +## About Weights & Biases + +Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models — architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. + +Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: + +- [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time +- [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically +- [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization +- [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators +- [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently +- [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models + +## First-Time Setup + +

+ Toggle Details +When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. + +W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: + +```shell +$ python train.py --project ... --name ... +``` + +YOLOv5 notebook example: Open In Colab Open In Kaggle +Screen Shot 2021-09-29 at 10 23 13 PM + +
+ +## Viewing Runs + +
+ Toggle Details +Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: + +- Training & Validation losses +- Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 +- Learning Rate over time +- A bounding box debugging panel, showing the training progress over time +- GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** +- System: Disk I/0, CPU utilization, RAM memory usage +- Your trained model as W&B Artifact +- Environment: OS and Python types, Git repository and state, **training command** + +

Weights & Biases dashboard

+
+ +## Disabling wandb + +- training after running `wandb disabled` inside that directory creates no wandb run + ![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) + +- To enable wandb again, run `wandb online` + ![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) + +## Advanced Usage + +You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. + +
+

1: Train and Log Evaluation simultaneousy

+ This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table + Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, + so no images will be uploaded from your system more than once. +
+ Usage + Code $ python train.py --upload_data val + +![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png) + +
+ +

2. Visualize and Version Datasets

+ Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. +
+ Usage + Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. + +![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) + +
+ +

3: Train using dataset artifact

+ When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that + can be used to train a model directly from the dataset artifact. This also logs evaluation +
+ Usage + Code $ python train.py --data {data}_wandb.yaml + +![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) + +
+ +

4: Save model checkpoints as artifacts

+ To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. + You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged + +
+ Usage + Code $ python train.py --save_period 1 + +![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) + +
+ +
+ +

5: Resume runs from checkpoint artifacts.

+Any run can be resumed using artifacts if the --resume argument starts with wandb-artifact:// prefix followed by the run path, i.e, wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. + +
+ Usage + Code $ python train.py --resume wandb-artifact://{run_path} + +![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) + +
+ +

6: Resume runs from dataset artifact & checkpoint artifacts.

+ Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device + The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or + train from _wandb.yaml file and set --save_period + +
+ Usage + Code $ python train.py --resume wandb-artifact://{run_path} + +![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) + +
+ + + +

Reports

+W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). + +Weights & Biases Reports + +## Environments + +YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + +- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + +## Status + +![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/utils/loggers/wandb/__init__.py b/utils/loggers/wandb/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py new file mode 100644 index 0000000..06e81fb --- /dev/null +++ b/utils/loggers/wandb/log_dataset.py @@ -0,0 +1,27 @@ +import argparse + +from wandb_utils import WandbLogger + +from utils.general import LOGGER + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def create_dataset_artifact(opt): + logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused + if not logger.wandb: + LOGGER.info("install wandb using `pip install wandb` to log the dataset") + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') + parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') + parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') + parser.add_argument('--entity', default=None, help='W&B entity') + parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') + + opt = parser.parse_args() + opt.resume = False # Explicitly disallow resume check for dataset upload job + + create_dataset_artifact(opt) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py new file mode 100644 index 0000000..d49ea6f --- /dev/null +++ b/utils/loggers/wandb/sweep.py @@ -0,0 +1,41 @@ +import sys +from pathlib import Path + +import wandb + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +from train import parse_opt, train +from utils.callbacks import Callbacks +from utils.general import increment_path +from utils.torch_utils import select_device + + +def sweep(): + wandb.init() + # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb. + hyp_dict = vars(wandb.config).get("_items").copy() + + # Workaround: get necessary opt args + opt = parse_opt(known=True) + opt.batch_size = hyp_dict.get("batch_size") + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) + opt.epochs = hyp_dict.get("epochs") + opt.nosave = True + opt.data = hyp_dict.get("data") + opt.weights = str(opt.weights) + opt.cfg = str(opt.cfg) + opt.data = str(opt.data) + opt.hyp = str(opt.hyp) + opt.project = str(opt.project) + device = select_device(opt.device, batch_size=opt.batch_size) + + # train + train(hyp_dict, opt, device, callbacks=Callbacks()) + + +if __name__ == "__main__": + sweep() diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml new file mode 100644 index 0000000..688b1ea --- /dev/null +++ b/utils/loggers/wandb/sweep.yaml @@ -0,0 +1,143 @@ +# Hyperparameters for training +# To set range- +# Provide min and max values as: +# parameter: +# +# min: scalar +# max: scalar +# OR +# +# Set a specific list of search space- +# parameter: +# values: [scalar1, scalar2, scalar3...] +# +# You can use grid, bayesian and hyperopt search strategy +# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration + +program: utils/loggers/wandb/sweep.py +method: random +metric: + name: metrics/mAP_0.5 + goal: maximize + +parameters: + # hyperparameters: set either min, max range or values list + data: + value: "data/coco128.yaml" + batch_size: + values: [64] + epochs: + values: [10] + + lr0: + distribution: uniform + min: 1e-5 + max: 1e-1 + lrf: + distribution: uniform + min: 0.01 + max: 1.0 + momentum: + distribution: uniform + min: 0.6 + max: 0.98 + weight_decay: + distribution: uniform + min: 0.0 + max: 0.001 + warmup_epochs: + distribution: uniform + min: 0.0 + max: 5.0 + warmup_momentum: + distribution: uniform + min: 0.0 + max: 0.95 + warmup_bias_lr: + distribution: uniform + min: 0.0 + max: 0.2 + box: + distribution: uniform + min: 0.02 + max: 0.2 + cls: + distribution: uniform + min: 0.2 + max: 4.0 + cls_pw: + distribution: uniform + min: 0.5 + max: 2.0 + obj: + distribution: uniform + min: 0.2 + max: 4.0 + obj_pw: + distribution: uniform + min: 0.5 + max: 2.0 + iou_t: + distribution: uniform + min: 0.1 + max: 0.7 + anchor_t: + distribution: uniform + min: 2.0 + max: 8.0 + fl_gamma: + distribution: uniform + min: 0.0 + max: 4.0 + hsv_h: + distribution: uniform + min: 0.0 + max: 0.1 + hsv_s: + distribution: uniform + min: 0.0 + max: 0.9 + hsv_v: + distribution: uniform + min: 0.0 + max: 0.9 + degrees: + distribution: uniform + min: 0.0 + max: 45.0 + translate: + distribution: uniform + min: 0.0 + max: 0.9 + scale: + distribution: uniform + min: 0.0 + max: 0.9 + shear: + distribution: uniform + min: 0.0 + max: 10.0 + perspective: + distribution: uniform + min: 0.0 + max: 0.001 + flipud: + distribution: uniform + min: 0.0 + max: 1.0 + fliplr: + distribution: uniform + min: 0.0 + max: 1.0 + mosaic: + distribution: uniform + min: 0.0 + max: 1.0 + mixup: + distribution: uniform + min: 0.0 + max: 1.0 + copy_paste: + distribution: uniform + min: 0.0 + max: 1.0 diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py new file mode 100644 index 0000000..238f4ed --- /dev/null +++ b/utils/loggers/wandb/wandb_utils.py @@ -0,0 +1,589 @@ +"""Utilities and tools for tracking runs with Weights & Biases.""" + +import logging +import os +import sys +from contextlib import contextmanager +from pathlib import Path +from typing import Dict + +import yaml +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +from utils.dataloaders import LoadImagesAndLabels, img2label_paths +from utils.general import LOGGER, check_dataset, check_file + +try: + import wandb + + assert hasattr(wandb, '__version__') # verify package import not local dir +except (ImportError, AssertionError): + wandb = None + +RANK = int(os.getenv('RANK', -1)) +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): + return from_string[len(prefix):] + + +def check_wandb_config_file(data_config_file): + wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path + if Path(wandb_config).is_file(): + return wandb_config + return data_config_file + + +def check_wandb_dataset(data_file): + is_trainset_wandb_artifact = False + is_valset_wandb_artifact = False + if isinstance(data_file, dict): + # In that case another dataset manager has already processed it and we don't have to + return data_file + if check_file(data_file) and data_file.endswith('.yaml'): + with open(data_file, errors='ignore') as f: + data_dict = yaml.safe_load(f) + is_trainset_wandb_artifact = isinstance(data_dict['train'], + str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) + is_valset_wandb_artifact = isinstance(data_dict['val'], + str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX) + if is_trainset_wandb_artifact or is_valset_wandb_artifact: + return data_dict + else: + return check_dataset(data_file) + + +def get_run_info(run_path): + run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) + run_id = run_path.stem + project = run_path.parent.stem + entity = run_path.parent.parent.stem + model_artifact_name = 'run_' + run_id + '_model' + return entity, project, run_id, model_artifact_name + + +def check_wandb_resume(opt): + process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None + if isinstance(opt.resume, str): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + if RANK not in [-1, 0]: # For resuming DDP runs + entity, project, run_id, model_artifact_name = get_run_info(opt.resume) + api = wandb.Api() + artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') + modeldir = artifact.download() + opt.weights = str(Path(modeldir) / "last.pt") + return True + return None + + +def process_wandb_config_ddp_mode(opt): + with open(check_file(opt.data), errors='ignore') as f: + data_dict = yaml.safe_load(f) # data dict + train_dir, val_dir = None, None + if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) + train_dir = train_artifact.download() + train_path = Path(train_dir) / 'data/images/' + data_dict['train'] = str(train_path) + + if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) + val_dir = val_artifact.download() + val_path = Path(val_dir) / 'data/images/' + data_dict['val'] = str(val_path) + if train_dir or val_dir: + ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') + with open(ddp_data_path, 'w') as f: + yaml.safe_dump(data_dict, f) + opt.data = ddp_data_path + + +class WandbLogger(): + """Log training runs, datasets, models, and predictions to Weights & Biases. + + This logger sends information to W&B at wandb.ai. By default, this information + includes hyperparameters, system configuration and metrics, model metrics, + and basic data metrics and analyses. + + By providing additional command line arguments to train.py, datasets, + models and predictions can also be logged. + + For more on how this logger is used, see the Weights & Biases documentation: + https://docs.wandb.com/guides/integrations/yolov5 + """ + + def __init__(self, opt, run_id=None, job_type='Training'): + """ + - Initialize WandbLogger instance + - Upload dataset if opt.upload_dataset is True + - Setup training processes if job_type is 'Training' + + arguments: + opt (namespace) -- Commandline arguments for this run + run_id (str) -- Run ID of W&B run to be resumed + job_type (str) -- To set the job_type for this run + + """ + # Temporary-fix + if opt.upload_dataset: + opt.upload_dataset = False + # LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") + + # Pre-training routine -- + self.job_type = job_type + self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run + self.val_artifact, self.train_artifact = None, None + self.train_artifact_path, self.val_artifact_path = None, None + self.result_artifact = None + self.val_table, self.result_table = None, None + self.bbox_media_panel_images = [] + self.val_table_path_map = None + self.max_imgs_to_log = 16 + self.wandb_artifact_data_dict = None + self.data_dict = None + # It's more elegant to stick to 1 wandb.init call, + # but useful config data is overwritten in the WandbLogger's wandb.init call + if isinstance(opt.resume, str): # checks resume from artifact + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + entity, project, run_id, model_artifact_name = get_run_info(opt.resume) + model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name + assert wandb, 'install wandb to resume wandb runs' + # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config + self.wandb_run = wandb.init(id=run_id, + project=project, + entity=entity, + resume='allow', + allow_val_change=True) + opt.resume = model_artifact_name + elif self.wandb: + self.wandb_run = wandb.init(config=opt, + resume="allow", + project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, + entity=opt.entity, + name=opt.name if opt.name != 'exp' else None, + job_type=job_type, + id=run_id, + allow_val_change=True) if not wandb.run else wandb.run + if self.wandb_run: + if self.job_type == 'Training': + if opt.upload_dataset: + if not opt.resume: + self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) + + if isinstance(opt.data, dict): + # This means another dataset manager has already processed the dataset info (e.g. ClearML) + # and they will have stored the already processed dict in opt.data + self.data_dict = opt.data + elif opt.resume: + # resume from artifact + if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + self.data_dict = dict(self.wandb_run.config.data_dict) + else: # local resume + self.data_dict = check_wandb_dataset(opt.data) + else: + self.data_dict = check_wandb_dataset(opt.data) + self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict + + # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. + self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True) + self.setup_training(opt) + + if self.job_type == 'Dataset Creation': + self.wandb_run.config.update({"upload_dataset": True}) + self.data_dict = self.check_and_upload_dataset(opt) + + def check_and_upload_dataset(self, opt): + """ + Check if the dataset format is compatible and upload it as W&B artifact + + arguments: + opt (namespace)-- Commandline arguments for current run + + returns: + Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. + """ + assert wandb, 'Install wandb to upload dataset' + config_path = self.log_dataset_artifact(opt.data, opt.single_cls, + 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) + with open(config_path, errors='ignore') as f: + wandb_data_dict = yaml.safe_load(f) + return wandb_data_dict + + def setup_training(self, opt): + """ + Setup the necessary processes for training YOLO models: + - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX + - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded + - Setup log_dict, initialize bbox_interval + + arguments: + opt (namespace) -- commandline arguments for this run + + """ + self.log_dict, self.current_epoch = {}, 0 + self.bbox_interval = opt.bbox_interval + if isinstance(opt.resume, str): + modeldir, _ = self.download_model_artifact(opt) + if modeldir: + self.weights = Path(modeldir) / "last.pt" + config = self.wandb_run.config + opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( + self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs,\ + config.hyp, config.imgsz + data_dict = self.data_dict + if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download + self.train_artifact_path, self.train_artifact = self.download_dataset_artifact( + data_dict.get('train'), opt.artifact_alias) + self.val_artifact_path, self.val_artifact = self.download_dataset_artifact( + data_dict.get('val'), opt.artifact_alias) + + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.val_artifact_path is not None: + val_path = Path(self.val_artifact_path) / 'data/images/' + data_dict['val'] = str(val_path) + + if self.val_artifact is not None: + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + columns = ["epoch", "id", "ground truth", "prediction"] + columns.extend(self.data_dict['names']) + self.result_table = wandb.Table(columns) + self.val_table = self.val_artifact.get("val") + if self.val_table_path_map is None: + self.map_val_table_path() + if opt.bbox_interval == -1: + self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 + if opt.evolve or opt.noplots: + self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval + train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None + # Update the the data_dict to point to local artifacts dir + if train_from_artifact: + self.data_dict = data_dict + + def download_dataset_artifact(self, path, alias): + """ + download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX + + arguments: + path -- path of the dataset to be used for training + alias (str)-- alias of the artifact to be download/used for training + + returns: + (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset + is found otherwise returns (None, None) + """ + if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): + artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) + dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) + assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" + datadir = dataset_artifact.download() + return datadir, dataset_artifact + return None, None + + def download_model_artifact(self, opt): + """ + download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX + + arguments: + opt (namespace) -- Commandline arguments for this run + """ + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") + assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + modeldir = model_artifact.download() + # epochs_trained = model_artifact.metadata.get('epochs_trained') + total_epochs = model_artifact.metadata.get('total_epochs') + is_finished = total_epochs is None + assert not is_finished, 'training is finished, can only resume incomplete runs.' + return modeldir, model_artifact + return None, None + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + """ + Log the model checkpoint as W&B artifact + + arguments: + path (Path) -- Path of directory containing the checkpoints + opt (namespace) -- Command line arguments for this run + epoch (int) -- Current epoch number + fitness_score (float) -- fitness score for current epoch + best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. + """ + model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', + type='model', + metadata={ + 'original_url': str(path), + 'epochs_trained': epoch + 1, + 'save period': opt.save_period, + 'project': opt.project, + 'total_epochs': opt.epochs, + 'fitness_score': fitness_score}) + model_artifact.add_file(str(path / 'last.pt'), name='last.pt') + wandb.log_artifact(model_artifact, + aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) + LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") + + def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): + """ + Log the dataset as W&B artifact and return the new data file with W&B links + + arguments: + data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. + single_class (boolean) -- train multi-class data as single-class + project (str) -- project name. Used to construct the artifact path + overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new + file with _wandb postfix. Eg -> data_wandb.yaml + + returns: + the new .yaml file with artifact links. it can be used to start training directly from artifacts + """ + upload_dataset = self.wandb_run.config.upload_dataset + log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val' + self.data_dict = check_dataset(data_file) # parse and check + data = dict(self.data_dict) + nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) + names = {k: v for k, v in enumerate(names)} # to index dictionary + + # log train set + if not log_val_only: + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(data['train'], rect=True, batch_size=1), + names, + name='train') if data.get('train') else None + if data.get('train'): + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') + + self.val_artifact = self.create_dataset_table( + LoadImagesAndLabels(data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None + if data.get('val'): + data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') + + path = Path(data_file) + # create a _wandb.yaml file with artifacts links if both train and test set are logged + if not log_val_only: + path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path + path = ROOT / 'data' / path + data.pop('download', None) + data.pop('path', None) + with open(path, 'w') as f: + yaml.safe_dump(data, f) + LOGGER.info(f"Created dataset config file {path}") + + if self.job_type == 'Training': # builds correct artifact pipeline graph + if not log_val_only: + self.wandb_run.log_artifact( + self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED! + self.wandb_run.use_artifact(self.val_artifact) + self.val_artifact.wait() + self.val_table = self.val_artifact.get('val') + self.map_val_table_path() + else: + self.wandb_run.log_artifact(self.train_artifact) + self.wandb_run.log_artifact(self.val_artifact) + return path + + def map_val_table_path(self): + """ + Map the validation dataset Table like name of file -> it's id in the W&B Table. + Useful for - referencing artifacts for evaluation. + """ + self.val_table_path_map = {} + LOGGER.info("Mapping dataset") + for i, data in enumerate(tqdm(self.val_table.data)): + self.val_table_path_map[data[3]] = data[0] + + def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): + """ + Create and return W&B artifact containing W&B Table of the dataset. + + arguments: + dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table + class_to_id -- hash map that maps class ids to labels + name -- name of the artifact + + returns: + dataset artifact to be logged or used + """ + # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging + artifact = wandb.Artifact(name=name, type="dataset") + img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None + img_files = tqdm(dataset.im_files) if not img_files else img_files + for img_file in img_files: + if Path(img_file).is_dir(): + artifact.add_dir(img_file, name='data/images') + labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) + artifact.add_dir(labels_path, name='data/labels') + else: + artifact.add_file(img_file, name='data/images/' + Path(img_file).name) + label_file = Path(img2label_paths([img_file])[0]) + artifact.add_file(str(label_file), name='data/labels/' + + label_file.name) if label_file.exists() else None + table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) + for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): + box_data, img_classes = [], {} + for cls, *xywh in labels[:, 1:].tolist(): + cls = int(cls) + box_data.append({ + "position": { + "middle": [xywh[0], xywh[1]], + "width": xywh[2], + "height": xywh[3]}, + "class_id": cls, + "box_caption": "%s" % (class_to_id[cls])}) + img_classes[cls] = class_to_id[cls] + boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), + Path(paths).name) + artifact.add(table, name) + return artifact + + def log_training_progress(self, predn, path, names): + """ + Build evaluation Table. Uses reference from validation dataset table. + + arguments: + predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] + path (str): local path of the current evaluation image + names (dict(int, str)): hash map that maps class ids to labels + """ + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + box_data = [] + avg_conf_per_class = [0] * len(self.data_dict['names']) + pred_class_count = {} + for *xyxy, conf, cls in predn.tolist(): + if conf >= 0.25: + cls = int(cls) + box_data.append({ + "position": { + "minX": xyxy[0], + "minY": xyxy[1], + "maxX": xyxy[2], + "maxY": xyxy[3]}, + "class_id": cls, + "box_caption": f"{names[cls]} {conf:.3f}", + "scores": { + "class_score": conf}, + "domain": "pixel"}) + avg_conf_per_class[cls] += conf + + if cls in pred_class_count: + pred_class_count[cls] += 1 + else: + pred_class_count[cls] = 1 + + for pred_class in pred_class_count.keys(): + avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] + + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + id = self.val_table_path_map[Path(path).name] + self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + *avg_conf_per_class) + + def val_one_image(self, pred, predn, path, names, im): + """ + Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel + + arguments: + pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] + predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] + path (str): local path of the current evaluation image + """ + if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact + self.log_training_progress(predn, path, names) + + if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: + if self.current_epoch % self.bbox_interval == 0: + box_data = [{ + "position": { + "minX": xyxy[0], + "minY": xyxy[1], + "maxX": xyxy[2], + "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": f"{names[int(cls)]} {conf:.3f}", + "scores": { + "class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) + + def log(self, log_dict): + """ + save the metrics to the logging dictionary + + arguments: + log_dict (Dict) -- metrics/media to be logged in current step + """ + if self.wandb_run: + for key, value in log_dict.items(): + self.log_dict[key] = value + + def end_epoch(self, best_result=False): + """ + commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. + + arguments: + best_result (boolean): Boolean representing if the result of this evaluation is best or not + """ + if self.wandb_run: + with all_logging_disabled(): + if self.bbox_media_panel_images: + self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images + try: + wandb.log(self.log_dict) + except BaseException as e: + LOGGER.info( + f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}" + ) + self.wandb_run.finish() + self.wandb_run = None + + self.log_dict = {} + self.bbox_media_panel_images = [] + if self.result_artifact: + self.result_artifact.add(self.result_table, 'result') + wandb.log_artifact(self.result_artifact, + aliases=[ + 'latest', 'last', 'epoch ' + str(self.current_epoch), + ('best' if best_result else '')]) + + wandb.log({"evaluation": self.result_table}) + columns = ["epoch", "id", "ground truth", "prediction"] + columns.extend(self.data_dict['names']) + self.result_table = wandb.Table(columns) + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + + def finish_run(self): + """ + Log metrics if any and finish the current W&B run + """ + if self.wandb_run: + if self.log_dict: + with all_logging_disabled(): + wandb.log(self.log_dict) + wandb.run.finish() + + +@contextmanager +def all_logging_disabled(highest_level=logging.CRITICAL): + """ source - https://gist.github.com/simon-weber/7853144 + A context manager that will prevent any logging messages triggered during the body from being processed. + :param highest_level: the maximum logging level in use. + This would only need to be changed if a custom level greater than CRITICAL is defined. + """ + previous_level = logging.root.manager.disable + logging.disable(highest_level) + try: + yield + finally: + logging.disable(previous_level) diff --git a/utils/loss.py b/utils/loss.py new file mode 100644 index 0000000..9b9c3d9 --- /dev/null +++ b/utils/loss.py @@ -0,0 +1,234 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Loss functions +""" + +import torch +import torch.nn as nn + +from utils.metrics import bbox_iou +from utils.torch_utils import de_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class BCEBlurWithLogitsLoss(nn.Module): + # BCEwithLogitLoss() with reduced missing label effects. + def __init__(self, alpha=0.05): + super().__init__() + self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() + self.alpha = alpha + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + pred = torch.sigmoid(pred) # prob from logits + dx = pred - true # reduce only missing label effects + # dx = (pred - true).abs() # reduce missing label and false label effects + alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) + loss *= alpha_factor + return loss.mean() + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class QFocalLoss(nn.Module): + # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + + pred_prob = torch.sigmoid(pred) # prob from logits + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = torch.abs(true - pred_prob) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class ComputeLoss: + sort_obj_iou = False + + # Compute losses + def __init__(self, model, autobalance=False): + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.na = m.na # number of anchors + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.anchors = m.anchors + self.device = device + + def __call__(self, p, targets): # predictions, targets + lcls = torch.zeros(1, device=self.device) # class loss + lbox = torch.zeros(1, device=self.device) # box loss + lobj = torch.zeros(1, device=self.device) # object loss + tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj + + n = b.shape[0] # number of targets + if n: + # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 + pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions + + # Regression + pxy = pxy.sigmoid() * 2 - 0.5 + pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + iou = iou.detach().clamp(0).type(tobj.dtype) + if self.sort_obj_iou: + j = iou.argsort() + b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] + if self.gr < 1: + iou = (1.0 - self.gr) + self.gr * iou + tobj[b, a, gj, gi] = iou # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(pcls, self.cn, device=self.device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(pcls, t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch = [], [], [], [] + gain = torch.ones(7, device=self.device) # normalized to gridspace gain + ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device).float() * g # offsets + + for i in range(self.nl): + anchors, shape = self.anchors[i], p[i].shape + gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain # shape(3,n,7) + if nt: + # Matches + r = t[..., 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + bc, gxy, gwh, a = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors + a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class + gij = (gxy - offsets).long() + gi, gj = gij.T # grid indices + + # Append + indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + + return tcls, tbox, indices, anch diff --git a/utils/metrics.py b/utils/metrics.py new file mode 100644 index 0000000..ed611d7 --- /dev/null +++ b/utils/metrics.py @@ -0,0 +1,368 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Model validation metrics +""" + +import math +import warnings +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + +from utils import TryExcept, threaded + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) + + +def smooth(y, f=0.05): + # Box filter of fraction f + nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) + p = np.ones(nf // 2) # ones padding + yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded + return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed + + +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=""): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes, nt = np.unique(target_cls, return_counts=True) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = nt[ci] # number of labels + n_p = i.sum() # number of predictions + if n_p == 0 or n_l == 0: + continue + + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + eps) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + eps) + names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data + names = dict(enumerate(names)) # to dict + if plot: + plot_pr_curve(px, py, ap, Path(save_dir) / f'{prefix}PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / f'{prefix}F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / f'{prefix}P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / f'{prefix}R_curve.png', names, ylabel='Recall') + + i = smooth(f1.mean(0), 0.1).argmax() # max F1 index + p, r, f1 = p[:, i], r[:, i], f1[:, i] + tp = (r * nt).round() # true positives + fp = (tp / (p + eps) - tp).round() # false positives + return tp, fp, p, r, f1, ap, unique_classes.astype(int) + + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.0], recall, [1.0])) + mpre = np.concatenate(([1.0], precision, [0.0])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + None, updates confusion matrix accordingly + """ + if detections is None: + gt_classes = labels.int() + for gc in gt_classes: + self.matrix[self.nc, gc] += 1 # background FN + return + + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(int) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[detection_classes[m1[j]], gc] += 1 # correct + else: + self.matrix[self.nc, gc] += 1 # true background + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[dc, self.nc] += 1 # predicted background + + def matrix(self): + return self.matrix + + def tp_fp(self): + tp = self.matrix.diagonal() # true positives + fp = self.matrix.sum(1) - tp # false positives + # fn = self.matrix.sum(0) - tp # false negatives (missed detections) + return tp[:-1], fp[:-1] # remove background class + + @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure: ') + def plot(self, normalize=True, save_dir='', names=()): + import seaborn as sn + + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) + nc, nn = self.nc, len(names) # number of classes, names + sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size + labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + ticklabels = (names + ['background']) if labels else "auto" + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap(array, + ax=ax, + annot=nc < 30, + annot_kws={ + "size": 8}, + cmap='Blues', + fmt='.2f', + square=True, + vmin=0.0, + xticklabels=ticklabels, + yticklabels=ticklabels).set_facecolor((1, 1, 1)) + ax.set_ylabel('True') + ax.set_ylabel('Predicted') + ax.set_title('Confusion Matrix') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + plt.close(fig) + + def print(self): + for i in range(self.nc + 1): + print(' '.join(map(str, self.matrix[i]))) + + +def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4) + + # Get the coordinates of bounding boxes + if xywh: # transform from xywh to xyxy + (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, 1), box2.chunk(4, 1) + w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 + b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ + b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ + else: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1) + b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1) + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + union = w1 * h1 + w2 * h2 - inter + eps + + # IoU + iou = inter / union + if CIoU or DIoU or GIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 + if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + return iou - rho2 / c2 # DIoU + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf + return iou # IoU + + +def box_area(box): + # box = xyxy(4,n) + return (box[2] - box[0]) * (box[3] - box[1]) + + +def box_iou(box1, box2, eps=1e-7): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + (a1, a2), (b1, b2) = box1[:, None].chunk(2, 2), box2.chunk(2, 1) + inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) + + # IoU = inter / (area1 + area2 - inter) + return inter / (box_area(box1.T)[:, None] + box_area(box2.T) - inter + eps) + + +def bbox_ioa(box1, box2, eps=1e-7): + """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 + box1: np.array of shape(4) + box2: np.array of shape(nx4) + returns: np.array of shape(n) + """ + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1 + b2_x1, b2_y1, b2_x2, b2_y2 = box2.T + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps + + # Intersection over box2 area + return inter_area / box2_area + + +def wh_iou(wh1, wh2, eps=1e-7): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps) # iou = inter / (area1 + area2 - inter) + + +# Plots ---------------------------------------------------------------------------------------------------------------- + + +@threaded +def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): + # Precision-recall curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.set_title('Precision-Recall Curve') + fig.savefig(save_dir, dpi=250) + plt.close(fig) + + +@threaded +def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = smooth(py.mean(0), 0.05) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.set_title(f'{ylabel}-Confidence Curve') + fig.savefig(save_dir, dpi=250) + plt.close(fig) diff --git a/utils/plots.py b/utils/plots.py new file mode 100644 index 0000000..36df271 --- /dev/null +++ b/utils/plots.py @@ -0,0 +1,575 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Plotting utils +""" + +import contextlib +import math +import os +from copy import copy +from pathlib import Path +from urllib.error import URLError + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sn +import torch +from PIL import Image, ImageDraw, ImageFont + +from utils import TryExcept, threaded +from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, + is_ascii, xywh2xyxy, xyxy2xywh) +from utils.metrics import fitness +from utils.segment.general import scale_image + +# Settings +RANK = int(os.getenv('RANK', -1)) +matplotlib.rc('font', **{'size': 11}) +matplotlib.use('Agg') # for writing to files only + + +class Colors: + # Ultralytics color palette https://ultralytics.com/ + def __init__(self): + # hex = matplotlib.colors.TABLEAU_COLORS.values() + hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', + '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') + self.palette = [self.hex2rgb(f'#{c}') for c in hexs] + self.n = len(self.palette) + + def __call__(self, i, bgr=False): + c = self.palette[int(i) % self.n] + return (c[2], c[1], c[0]) if bgr else c + + @staticmethod + def hex2rgb(h): # rgb order (PIL) + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + +colors = Colors() # create instance for 'from utils.plots import colors' + + +def check_pil_font(font=FONT, size=10): + # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary + font = Path(font) + font = font if font.exists() else (CONFIG_DIR / font.name) + try: + return ImageFont.truetype(str(font) if font.exists() else font.name, size) + except Exception: # download if missing + try: + check_font(font) + return ImageFont.truetype(str(font), size) + except TypeError: + check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 + except URLError: # not online + return ImageFont.load_default() + + +class Annotator: + # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations + def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' + non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic + self.pil = pil or non_ascii + if self.pil: # use PIL + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font, + size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) + else: # use cv2 + self.im = im + self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width + + def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): + # Add one xyxy box to image with label + if self.pil or not is_ascii(label): + self.draw.rectangle(box, width=self.lw, outline=color) # box + if label: + w, h = self.font.getsize(label) # text width, height + outside = box[1] - h >= 0 # label fits outside box + self.draw.rectangle( + (box[0], box[1] - h if outside else box[1], box[0] + w + 1, + box[1] + 1 if outside else box[1] + h + 1), + fill=color, + ) + # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 + self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) + else: # cv2 + p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) + if label: + tf = max(self.lw - 1, 1) # font thickness + w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + outside = p1[1] - h >= 3 + p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 + cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled + cv2.putText(self.im, + label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), + 0, + self.lw / 3, + txt_color, + thickness=tf, + lineType=cv2.LINE_AA) + + def masks(self, masks, colors, im_gpu=None, alpha=0.5): + """Plot masks at once. + Args: + masks (tensor): predicted masks on cuda, shape: [n, h, w] + colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n] + im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1] + alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque + """ + if self.pil: + # convert to numpy first + self.im = np.asarray(self.im).copy() + if im_gpu is None: + # Add multiple masks of shape(h,w,n) with colors list([r,g,b], [r,g,b], ...) + if len(masks) == 0: + return + if isinstance(masks, torch.Tensor): + masks = torch.as_tensor(masks, dtype=torch.uint8) + masks = masks.permute(1, 2, 0).contiguous() + masks = masks.cpu().numpy() + # masks = np.ascontiguousarray(masks.transpose(1, 2, 0)) + masks = scale_image(masks.shape[:2], masks, self.im.shape) + masks = np.asarray(masks, dtype=np.float32) + colors = np.asarray(colors, dtype=np.float32) # shape(n,3) + s = masks.sum(2, keepdims=True).clip(0, 1) # add all masks together + masks = (masks @ colors).clip(0, 255) # (h,w,n) @ (n,3) = (h,w,3) + self.im[:] = masks * alpha + self.im * (1 - s * alpha) + else: + if len(masks) == 0: + self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 + colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 + colors = colors[:, None, None] # shape(n,1,1,3) + masks = masks.unsqueeze(3) # shape(n,h,w,1) + masks_color = masks * (colors * alpha) # shape(n,h,w,3) + + inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) + mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) + + im_gpu = im_gpu.flip(dims=[0]) # flip channel + im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) + im_gpu = im_gpu * inv_alph_masks[-1] + mcs + im_mask = (im_gpu * 255).byte().cpu().numpy() + self.im[:] = scale_image(im_gpu.shape, im_mask, self.im.shape) + if self.pil: + # convert im back to PIL and update draw + self.fromarray(self.im) + + def rectangle(self, xy, fill=None, outline=None, width=1): + # Add rectangle to image (PIL-only) + self.draw.rectangle(xy, fill, outline, width) + + def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): + # Add text to image (PIL-only) + if anchor == 'bottom': # start y from font bottom + w, h = self.font.getsize(text) # text width, height + xy[1] += 1 - h + self.draw.text(xy, text, fill=txt_color, font=self.font) + + def fromarray(self, im): + # Update self.im from a numpy array + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + + def result(self): + # Return annotated image as array + return np.asarray(self.im) + + +def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): + """ + x: Features to be visualized + module_type: Module type + stage: Module stage within model + n: Maximum number of feature maps to plot + save_dir: Directory to save results + """ + if 'Detect' not in module_type: + batch, channels, height, width = x.shape # batch, channels, height, width + if height > 1 and width > 1: + f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename + + blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels + n = min(n, channels) # number of plots + fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols + ax = ax.ravel() + plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze()) # cmap='gray' + ax[i].axis('off') + + LOGGER.info(f'Saving {f}... ({n}/{channels})') + plt.savefig(f, dpi=300, bbox_inches='tight') + plt.close() + np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save + + +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + from scipy.signal import butter, filtfilt + + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + +def output_to_target(output, max_det=300): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting + targets = [] + for i, o in enumerate(output): + box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1) + j = torch.full((conf.shape[0], 1), i) + targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1)) + return torch.cat(targets, 0).numpy() + + +@threaded +def plot_images(images, targets, paths=None, fname='images.jpg', names=None): + # Plot image grid with labels + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(targets) > 0: + ti = targets[targets[:, 0] == i] # image targets + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] + color = colors(cls) + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + annotator.im.save(fname) # save + + +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): + # Plot LR simulating training for full epochs + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + y = [] + for _ in range(epochs): + scheduler.step() + y.append(optimizer.param_groups[0]['lr']) + plt.plot(y, '.-', label='LR') + plt.xlabel('epoch') + plt.ylabel('LR') + plt.grid() + plt.xlim(0, epochs) + plt.ylim(0) + plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.close() + + +def plot_val_txt(): # from utils.plots import *; plot_val() + # Plot val.txt histograms + x = np.loadtxt('val.txt', dtype=np.float32) + box = xyxy2xywh(x[:, :4]) + cx, cy = box[:, 0], box[:, 1] + + fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) + ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) + ax.set_aspect('equal') + plt.savefig('hist2d.png', dpi=300) + + fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) + ax[0].hist(cx, bins=600) + ax[1].hist(cy, bins=600) + plt.savefig('hist1d.png', dpi=200) + + +def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() + # Plot targets.txt histograms + x = np.loadtxt('targets.txt', dtype=np.float32).T + s = ['x targets', 'y targets', 'width targets', 'height targets'] + fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) + ax = ax.ravel() + for i in range(4): + ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') + ax[i].legend() + ax[i].set_title(s[i]) + plt.savefig('targets.jpg', dpi=200) + + +def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() + # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) + save_dir = Path(file).parent if file else Path(dir) + plot2 = False # plot additional results + if plot2: + ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() + + fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) + # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: + for f in sorted(save_dir.glob('study*.txt')): + y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T + x = np.arange(y.shape[1]) if x is None else np.array(x) + if plot2: + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] + for i in range(7): + ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + ax[i].set_title(s[i]) + + j = y[3].argmax() + 1 + ax2.plot(y[5, 1:j], + y[3, 1:j] * 1E2, + '.-', + linewidth=2, + markersize=8, + label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) + + ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + 'k.-', + linewidth=2, + markersize=8, + alpha=.25, + label='EfficientDet') + + ax2.grid(alpha=0.2) + ax2.set_yticks(np.arange(20, 60, 5)) + ax2.set_xlim(0, 57) + ax2.set_ylim(25, 55) + ax2.set_xlabel('GPU Speed (ms/img)') + ax2.set_ylabel('COCO AP val') + ax2.legend(loc='lower right') + f = save_dir / 'study.png' + print(f'Saving {f}...') + plt.savefig(f, dpi=300) + + +@TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395 +def plot_labels(labels, names=(), save_dir=Path('')): + # plot dataset labels + LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") + c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes + nc = int(c.max() + 1) # number of classes + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # seaborn correlogram + sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # matplotlib labels + matplotlib.use('svg') # faster + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + with contextlib.suppress(Exception): # color histogram bars by class + [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') + sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # rectangles + labels[:, 1:3] = 0.5 # center + labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 + img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) + for cls, *box in labels[:1000]: + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + plt.savefig(save_dir / 'labels.jpg', dpi=200) + matplotlib.use('Agg') + plt.close() + + +def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')): + # Show classification image grid with labels (optional) and predictions (optional) + from utils.augmentations import denormalize + + names = names or [f'class{i}' for i in range(1000)] + blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im), + dim=0) # select batch index 0, block by channels + n = min(len(blocks), nmax) # number of plots + m = min(8, round(n ** 0.5)) # 8 x 8 default + fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols + ax = ax.ravel() if m > 1 else [ax] + # plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0)) + ax[i].axis('off') + if labels is not None: + s = names[labels[i]] + (f'—{names[pred[i]]}' if pred is not None else '') + ax[i].set_title(s, fontsize=8, verticalalignment='top') + plt.savefig(f, dpi=300, bbox_inches='tight') + plt.close() + if verbose: + LOGGER.info(f"Saving {f}") + if labels is not None: + LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) + if pred is not None: + LOGGER.info('Predicted:' + ' '.join(f'{names[i]:3s}' for i in pred[:nmax])) + return f + + +def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() + # Plot evolve.csv hyp evolution results + evolve_csv = Path(evolve_csv) + data = pd.read_csv(evolve_csv) + keys = [x.strip() for x in data.columns] + x = data.values + f = fitness(x) + j = np.argmax(f) # max fitness index + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + print(f'Best results from row {j} of {evolve_csv}:') + for i, k in enumerate(keys[7:]): + v = x[:, 7 + i] + mu = v[j] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print(f'{k:>15}: {mu:.3g}') + f = evolve_csv.with_suffix('.png') # filename + plt.savefig(f, dpi=200) + plt.close() + print(f'Saved {f}') + + +def plot_results(file='path/to/results.csv', dir=''): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + ax = ax.ravel() + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' + for f in files: + try: + data = pd.read_csv(f) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): + y = data.values[:, j].astype('float') + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) + ax[i].set_title(s[j], fontsize=12) + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + LOGGER.info(f'Warning: Plotting error for {f}: {e}') + ax[1].legend() + fig.savefig(save_dir / 'results.png', dpi=200) + plt.close() + + +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print(f'Warning: Plotting error for {f}; {e}') + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + + +def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): + # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop + xyxy = torch.tensor(xyxy).view(-1, 4) + b = xyxy2xywh(xyxy) # boxes + if square: + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square + b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad + xyxy = xywh2xyxy(b).long() + clip_boxes(xyxy, im.shape) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + if save: + file.parent.mkdir(parents=True, exist_ok=True) # make directory + f = str(increment_path(file).with_suffix('.jpg')) + # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue + Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB + return crop diff --git a/utils/segment/__init__.py b/utils/segment/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/segment/__pycache__/__init__.cpython-38.pyc b/utils/segment/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4300726de064d4bbbbb1a6f9a68b5c34623e40d GIT binary patch literal 173 zcmWIL<>g`k0$U;XWDxxrL?8o3AjbiSi&=m~3PUi1CZpd8?fsY2+QSWCP#1)g9aM2(f?-)v801?IH1*&%11``0_PPmGw1UeT~S}7-x~S zR^rUpeEm6Fvwg!i@h$sj{E}}!=WC8X>z928xr%?*pYo@XoAS^3r~EgNoA%H9U-4&< zJLU67tU7mqQCYpI(P#Wr@U`$R<4r$BlCur=R3B&qePCpKsO8+p?}au=TlxhxpOOC5 z7?=ZVP#V}7-_UadnnrGJn7;9`PSS-2$q&j(LO&@%{VEA3XL&g{H%+9aO*YGNC$~0D z^|h%y%di5|o8yc*E}r>ik@V)>o(TM~nFYQZdFf{AuJ5{gfk?u*?I!)q4YmTYn{}ud zCz%_@Zq^BoD6Ow2JBzBn$3ZhoM7W2Z(8qEofy$Mv7I+Rk;U?c&|3;aihjNZx$O#93(l;}=Vv zBnp<=Jul6??$Tn@Yj%PqHPlk4zrNJ(X2R?4W??gJ>?Yl0>#YVx$pW#|&%$oHlm_i6 zh_j`35C;%k?Cr{O6BZ_^%@RgP)yZWL&v8o=O9_X(|`;fK+JLO=t^P&M1nR2Ps@ zt4tvY%TI)#0X%M)x^X{RNADB(j0cOOR4EfQg0beC|Cx${!Obx4VcanC+ChT=yWn0S z#8wNCiy-TZ_@+Asu^Q^&pLpuhUJtur9C%{or6!Fi@dG*Sb;EYtXeI*2O9cV&i^43Q zzg@KjSw)(yZsKLq*b0MfSxUopl!U(IE3yI%2cnk%(}A+8)Z1C3YD-IPVJdCfk+c!U z(kfP~Iux_is}^CDCN!=+Hf3x8PFzLc5-KE`;G#1 zW$$l?fL;$kD#8CfqQx96TUU5JAg6dh{#mGmV6q}EQu(V$s^tQ7U!#hzQ{o~4iFOk} zp14SSgOYhlzKNtdCB8-ZZ&UIeO4KQ^Cooj-72lIy0V z>;-m-N`OiZ=u!!gf}cOE$&Fby$9zM{5qB!B!`hZMFk_n-6>u{%ur1~GxynI)pKFX_ z2$YLaRr_2aQH*2KJg5eeICL2zk_~;I`^F{{f5^bolbYYK-)j5(BaUNSJI}}4GN#L zg2kkCyjTf<lg2Q_WMu%{-?d$O*)p7hp3v~WxVOn zkYw=gh#(QKj@U>W0?Q*lK)g9Ld~L+x&-H?QflxlFyTHx^^>Yk}rs!2W4}8PU#@=Vj z&Sa76ut;46anW@*3cUEJJ3b6Oll2WmkA*`fwHnkO6l%2-YVFKDDql~s;3hnM(R<;K zx7{7L$aRjpV}%koO!~kX=JlWei6StCk$Tk>6f8+A^0KJkl{_PkLRcd8v>&}Ri`edZ z@u^8Y=%sQ>Ip7p42$NWv{TQFuO{aWt0|T$qjuUY(`y4_sL7RWVG!&L8)7(Oa%reV{ zxa^Ghkdrx-VN@B$P~Y>#QU5Yy{O-SDjIj;>XU4dK0jg@E#ZjdQs9x0sZG!j-C3h)# zjl;{zoV2S6TF1LO3*r_(M_HD-o@fVYcI>`JsHaF*ZSN<@wAnxd&KaUv72o2k(0`Xb z+Z*VaF*I|e8x~%KB*dk_J5I!=CRQ_hST63Axp5Z^3)2{J4XqYhpQ9Dw1Esia;@&xt z8CFIJVPMQD3MIgzI6K4X+`za9B}SaH4=?5#PEw@~Gyz;cPym%yGl`pC)}+muJmhr= zO^@&@57q`=o}DJj1u_U5+OM?_4Rod{XDx^Xt6Y{qSH!mnLAAwelJC5}x6Z9@h^Wq6QI=a<4B<#QwMlS-2?S3Mby8hN)H< z`^!n3C4G^~>D7DhuQu*|@czBk`knjo%=~V|DM$1o~#sm zAH)celjly&(3%9}X4m6xX#ZXzwCd+{{w3iiRR=IleA=>iCxC zkW2CJ2V)w=9_J8|(0i57a6$A@0Qdi4#pNbwkX@q`j$v$Zqo9=RCD zi>-dFXv*tiS+R5u%SHU9A~6c*Rm7t_h@vQUqSWX|jG~sIt_t=oc=CQXxJ}qh$-p*d TZ1Wl2p0Q_~OU`9y+L`_r&2xEZ literal 0 HcmV?d00001 diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py new file mode 100644 index 0000000..169adde --- /dev/null +++ b/utils/segment/augmentations.py @@ -0,0 +1,104 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Image augmentation functions +""" + +import math +import random + +import cv2 +import numpy as np + +from ..augmentations import box_candidates +from ..general import resample_segments, segment2box + + +def mixup(im, labels, segments, im2, labels2, segments2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + segments = np.concatenate((segments, segments2), 0) + return im, labels, segments + + +def random_perspective(im, + targets=(), + segments=(), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels) + T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + new_segments = [] + if n: + new = np.zeros((n, 4)) + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + new_segments.append(xy) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01) + targets = targets[i] + targets[:, 1:5] = new[i] + new_segments = np.array(new_segments)[i] + + return im, targets, new_segments diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py new file mode 100644 index 0000000..a63d6ec --- /dev/null +++ b/utils/segment/dataloaders.py @@ -0,0 +1,330 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Dataloaders +""" + +import os +import random + +import cv2 +import numpy as np +import torch +from torch.utils.data import DataLoader, distributed + +from ..augmentations import augment_hsv, copy_paste, letterbox +from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker +from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn +from ..torch_utils import torch_distributed_zero_first +from .augmentations import mixup, random_perspective + +RANK = int(os.getenv('RANK', -1)) + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + quad=False, + prefix='', + shuffle=False, + mask_downsample_ratio=1, + overlap_mask=False): + if rect and shuffle: + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabelsAndMasks( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix, + downsample_ratio=mask_downsample_ratio, + overlap=overlap_mask) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return loader( + dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, + worker_init_fn=seed_worker, + generator=generator, + ), dataset + + +class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing + + def __init__( + self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0, + prefix="", + downsample_ratio=1, + overlap=False, + ): + super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, + stride, pad, prefix) + self.downsample_ratio = downsample_ratio + self.overlap = overlap + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + masks = [] + if mosaic: + # Load mosaic + img, labels, segments = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp["mixup"]: + img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + # [array, array, ....], array.shape=(num_points, 2), xyxyxyxy + segments = self.segments[index].copy() + if len(segments): + for i_s in range(len(segments)): + segments[i_s] = xyn2xy( + segments[i_s], + ratio[0] * w, + ratio[1] * h, + padw=pad[0], + padh=pad[1], + ) + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels, segments = random_perspective(img, + labels, + segments=segments, + degrees=hyp["degrees"], + translate=hyp["translate"], + scale=hyp["scale"], + shear=hyp["shear"], + perspective=hyp["perspective"]) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) + if self.overlap: + masks, sorted_idx = polygons2masks_overlap(img.shape[:2], + segments, + downsample_ratio=self.downsample_ratio) + masks = masks[None] # (640, 640) -> (1, 640, 640) + labels = labels[sorted_idx] + else: + masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio) + + masks = (torch.from_numpy(masks) if len(masks) else torch.zeros(1 if self.overlap else nl, img.shape[0] // + self.downsample_ratio, img.shape[1] // + self.downsample_ratio)) + # TODO: albumentations support + if self.augment: + # Albumentations + # there are some augmentation that won't change boxes and masks, + # so just be it for now. + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + + # HSV color-space + augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) + + # Flip up-down + if random.random() < hyp["flipud"]: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + masks = torch.flip(masks, dims=[1]) + + # Flip left-right + if random.random() < hyp["fliplr"]: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + masks = torch.flip(masks, dims=[2]) + + # Cutouts # labels = cutout(img, labels, p=0.5) + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks) + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + + # 3 additional image indices + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + labels, segments = self.labels[index].copy(), self.segments[index].copy() + + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"]) + img4, labels4, segments4 = random_perspective(img4, + labels4, + segments4, + degrees=self.hyp["degrees"], + translate=self.hyp["translate"], + scale=self.hyp["scale"], + shear=self.hyp["shear"], + perspective=self.hyp["perspective"], + border=self.mosaic_border) # border to remove + return img4, labels4, segments4 + + @staticmethod + def collate_fn(batch): + img, label, path, shapes, masks = zip(*batch) # transposed + batched_masks = torch.cat(masks, 0) + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes, batched_masks + + +def polygon2mask(img_size, polygons, color=1, downsample_ratio=1): + """ + Args: + img_size (tuple): The image size. + polygons (np.ndarray): [N, M], N is the number of polygons, + M is the number of points(Be divided by 2). + """ + mask = np.zeros(img_size, dtype=np.uint8) + polygons = np.asarray(polygons) + polygons = polygons.astype(np.int32) + shape = polygons.shape + polygons = polygons.reshape(shape[0], -1, 2) + cv2.fillPoly(mask, polygons, color=color) + nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio) + # NOTE: fillPoly firstly then resize is trying the keep the same way + # of loss calculation when mask-ratio=1. + mask = cv2.resize(mask, (nw, nh)) + return mask + + +def polygons2masks(img_size, polygons, color, downsample_ratio=1): + """ + Args: + img_size (tuple): The image size. + polygons (list[np.ndarray]): each polygon is [N, M], + N is the number of polygons, + M is the number of points(Be divided by 2). + """ + masks = [] + for si in range(len(polygons)): + mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio) + masks.append(mask) + return np.array(masks) + + +def polygons2masks_overlap(img_size, segments, downsample_ratio=1): + """Return a (640, 640) overlap mask.""" + masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), + dtype=np.int32 if len(segments) > 255 else np.uint8) + areas = [] + ms = [] + for si in range(len(segments)): + mask = polygon2mask( + img_size, + [segments[si].reshape(-1)], + downsample_ratio=downsample_ratio, + color=1, + ) + ms.append(mask) + areas.append(mask.sum()) + areas = np.asarray(areas) + index = np.argsort(-areas) + ms = np.array(ms)[index] + for i in range(len(segments)): + mask = ms[i] * (i + 1) + masks = masks + mask + masks = np.clip(masks, a_min=0, a_max=i + 1) + return masks, index diff --git a/utils/segment/general.py b/utils/segment/general.py new file mode 100644 index 0000000..43bdc46 --- /dev/null +++ b/utils/segment/general.py @@ -0,0 +1,134 @@ +import cv2 +import numpy as np +import torch +import torch.nn.functional as F + + +def crop_mask(masks, boxes): + """ + "Crop" predicted masks by zeroing out everything not in the predicted bbox. + Vectorized by Chong (thanks Chong). + + Args: + - masks should be a size [h, w, n] tensor of masks + - boxes should be a size [n, 4] tensor of bbox coords in relative point form + """ + + n, h, w = masks.shape + x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) + r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) + c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) + + return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) + + +def process_mask_upsample(protos, masks_in, bboxes, shape): + """ + Crop after upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + +def process_mask(protos, masks_in, bboxes, shape, upsample=False): + """ + Crop before upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + + c, mh, mw = protos.shape # CHW + ih, iw = shape + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW + + downsampled_bboxes = bboxes.clone() + downsampled_bboxes[:, 0] *= mw / iw + downsampled_bboxes[:, 2] *= mw / iw + downsampled_bboxes[:, 3] *= mh / ih + downsampled_bboxes[:, 1] *= mh / ih + + masks = crop_mask(masks, downsampled_bboxes) # CHW + if upsample: + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + return masks.gt_(0.5) + + +def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): + """ + img1_shape: model input shape, [h, w] + img0_shape: origin pic shape, [h, w, 3] + masks: [h, w, num] + """ + # Rescale coordinates (xyxy) from im1_shape to im0_shape + if ratio_pad is None: # calculate from im0_shape + gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new + pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding + else: + pad = ratio_pad[1] + top, left = int(pad[1]), int(pad[0]) # y, x + bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) + + if len(masks.shape) < 2: + raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') + masks = masks[top:bottom, left:right] + # masks = masks.permute(2, 0, 1).contiguous() + # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] + # masks = masks.permute(1, 2, 0).contiguous() + masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) + + if len(masks.shape) == 2: + masks = masks[:, :, None] + return masks + + +def mask_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [M, n] m2 means number of gt objects + Note: n means image_w x image_h + + return: masks iou, [N, M] + """ + intersection = torch.matmul(mask1, mask2.t()).clamp(0) + union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection + return intersection / (union + eps) + + +def masks_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [N, n] m2 means number of gt objects + Note: n means image_w x image_h + + return: masks iou, (N, ) + """ + intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) + union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection + return intersection / (union + eps) + + +def masks2segments(masks, strategy='largest'): + # Convert masks(n,160,160) into segments(n,xy) + segments = [] + for x in masks.int().cpu().numpy().astype('uint8'): + c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] + if strategy == 'concat': # concatenate all segments + c = np.concatenate([x.reshape(-1, 2) for x in c]) + elif strategy == 'largest': # select largest segment + c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + segments.append(c.astype('float32')) + return segments diff --git a/utils/segment/loss.py b/utils/segment/loss.py new file mode 100644 index 0000000..b45b2c2 --- /dev/null +++ b/utils/segment/loss.py @@ -0,0 +1,186 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..general import xywh2xyxy +from ..loss import FocalLoss, smooth_BCE +from ..metrics import bbox_iou +from ..torch_utils import de_parallel +from .general import crop_mask + + +class ComputeLoss: + # Compute losses + def __init__(self, model, autobalance=False, overlap=False): + self.sort_obj_iou = False + self.overlap = overlap + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + self.device = device + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.na = m.na # number of anchors + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.nm = m.nm # number of masks + self.anchors = m.anchors + self.device = device + + def __call__(self, preds, targets, masks): # predictions, targets, model + p, proto = preds + bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width + lcls = torch.zeros(1, device=self.device) + lbox = torch.zeros(1, device=self.device) + lobj = torch.zeros(1, device=self.device) + lseg = torch.zeros(1, device=self.device) + tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj + + n = b.shape[0] # number of targets + if n: + pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions + + # Box regression + pxy = pxy.sigmoid() * 2 - 0.5 + pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + iou = iou.detach().clamp(0).type(tobj.dtype) + if self.sort_obj_iou: + j = iou.argsort() + b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] + if self.gr < 1: + iou = (1.0 - self.gr) + self.gr * iou + tobj[b, a, gj, gi] = iou # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(pcls, self.cn, device=self.device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(pcls, t) # BCE + + # Mask regression + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] + marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized + mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) + for bi in b.unique(): + j = b == bi # matching index + if self.overlap: + mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0) + else: + mask_gti = masks[tidxs[i]][j] + lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j]) + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp["box"] + lobj *= self.hyp["obj"] + lcls *= self.hyp["cls"] + lseg *= self.hyp["box"] / bs + + loss = lbox + lobj + lcls + lseg + return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() + + def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): + # Mask loss for one image + pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], [] + gain = torch.ones(8, device=self.device) # normalized to gridspace gain + ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + if self.overlap: + batch = p[0].shape[0] + ti = [] + for i in range(batch): + num = (targets[:, 0] == i).sum() # find number of targets of each image + ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1) # (na, num) + ti = torch.cat(ti, 1) # (na, nt) + else: + ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1) + targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device).float() * g # offsets + + for i in range(self.nl): + anchors, shape = self.anchors[i], p[i].shape + gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain # shape(3,n,7) + if nt: + # Matches + r = t[..., 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + bc, gxy, gwh, at = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors + (a, tidx), (b, c) = at.long().T, bc.long().T # anchors, image, class + gij = (gxy - offsets).long() + gi, gj = gij.T # grid indices + + # Append + indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + tidxs.append(tidx) + xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6]) # xywh normalized + + return tcls, tbox, indices, anch, tidxs, xywhn diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py new file mode 100644 index 0000000..b09ce23 --- /dev/null +++ b/utils/segment/metrics.py @@ -0,0 +1,210 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Model validation metrics +""" + +import numpy as np + +from ..metrics import ap_per_class + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9] + return (x[:, :8] * w).sum(1) + + +def ap_per_class_box_and_mask( + tp_m, + tp_b, + conf, + pred_cls, + target_cls, + plot=False, + save_dir=".", + names=(), +): + """ + Args: + tp_b: tp of boxes. + tp_m: tp of masks. + other arguments see `func: ap_per_class`. + """ + results_boxes = ap_per_class(tp_b, + conf, + pred_cls, + target_cls, + plot=plot, + save_dir=save_dir, + names=names, + prefix="Box")[2:] + results_masks = ap_per_class(tp_m, + conf, + pred_cls, + target_cls, + plot=plot, + save_dir=save_dir, + names=names, + prefix="Mask")[2:] + + results = { + "boxes": { + "p": results_boxes[0], + "r": results_boxes[1], + "ap": results_boxes[3], + "f1": results_boxes[2], + "ap_class": results_boxes[4]}, + "masks": { + "p": results_masks[0], + "r": results_masks[1], + "ap": results_masks[3], + "f1": results_masks[2], + "ap_class": results_masks[4]}} + return results + + +class Metric: + + def __init__(self) -> None: + self.p = [] # (nc, ) + self.r = [] # (nc, ) + self.f1 = [] # (nc, ) + self.all_ap = [] # (nc, 10) + self.ap_class_index = [] # (nc, ) + + @property + def ap50(self): + """AP@0.5 of all classes. + Return: + (nc, ) or []. + """ + return self.all_ap[:, 0] if len(self.all_ap) else [] + + @property + def ap(self): + """AP@0.5:0.95 + Return: + (nc, ) or []. + """ + return self.all_ap.mean(1) if len(self.all_ap) else [] + + @property + def mp(self): + """mean precision of all classes. + Return: + float. + """ + return self.p.mean() if len(self.p) else 0.0 + + @property + def mr(self): + """mean recall of all classes. + Return: + float. + """ + return self.r.mean() if len(self.r) else 0.0 + + @property + def map50(self): + """Mean AP@0.5 of all classes. + Return: + float. + """ + return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 + + @property + def map(self): + """Mean AP@0.5:0.95 of all classes. + Return: + float. + """ + return self.all_ap.mean() if len(self.all_ap) else 0.0 + + def mean_results(self): + """Mean of results, return mp, mr, map50, map""" + return (self.mp, self.mr, self.map50, self.map) + + def class_result(self, i): + """class-aware result, return p[i], r[i], ap50[i], ap[i]""" + return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) + + def get_maps(self, nc): + maps = np.zeros(nc) + self.map + for i, c in enumerate(self.ap_class_index): + maps[c] = self.ap[i] + return maps + + def update(self, results): + """ + Args: + results: tuple(p, r, ap, f1, ap_class) + """ + p, r, all_ap, f1, ap_class_index = results + self.p = p + self.r = r + self.all_ap = all_ap + self.f1 = f1 + self.ap_class_index = ap_class_index + + +class Metrics: + """Metric for boxes and masks.""" + + def __init__(self) -> None: + self.metric_box = Metric() + self.metric_mask = Metric() + + def update(self, results): + """ + Args: + results: Dict{'boxes': Dict{}, 'masks': Dict{}} + """ + self.metric_box.update(list(results["boxes"].values())) + self.metric_mask.update(list(results["masks"].values())) + + def mean_results(self): + return self.metric_box.mean_results() + self.metric_mask.mean_results() + + def class_result(self, i): + return self.metric_box.class_result(i) + self.metric_mask.class_result(i) + + def get_maps(self, nc): + return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc) + + @property + def ap_class_index(self): + # boxes and masks have the same ap_class_index + return self.metric_box.ap_class_index + + +KEYS = [ + "train/box_loss", + "train/seg_loss", # train loss + "train/obj_loss", + "train/cls_loss", + "metrics/precision(B)", + "metrics/recall(B)", + "metrics/mAP_0.5(B)", + "metrics/mAP_0.5:0.95(B)", # metrics + "metrics/precision(M)", + "metrics/recall(M)", + "metrics/mAP_0.5(M)", + "metrics/mAP_0.5:0.95(M)", # metrics + "val/box_loss", + "val/seg_loss", # val loss + "val/obj_loss", + "val/cls_loss", + "x/lr0", + "x/lr1", + "x/lr2",] + +BEST_KEYS = [ + "best/epoch", + "best/precision(B)", + "best/recall(B)", + "best/mAP_0.5(B)", + "best/mAP_0.5:0.95(B)", + "best/precision(M)", + "best/recall(M)", + "best/mAP_0.5(M)", + "best/mAP_0.5:0.95(M)",] diff --git a/utils/segment/plots.py b/utils/segment/plots.py new file mode 100644 index 0000000..9b90900 --- /dev/null +++ b/utils/segment/plots.py @@ -0,0 +1,143 @@ +import contextlib +import math +from pathlib import Path + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import torch + +from .. import threaded +from ..general import xywh2xyxy +from ..plots import Annotator, colors + + +@threaded +def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None): + # Plot image grid with labels + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + if isinstance(masks, torch.Tensor): + masks = masks.cpu().numpy().astype(int) + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(targets) > 0: + idx = targets[:, 0] == i + ti = targets[idx] # image targets + + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] + color = colors(cls) + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + + # Plot masks + if len(masks): + if masks.max() > 1.0: # mean that masks are overlap + image_masks = masks[[i]] # (1, 640, 640) + nl = len(ti) + index = np.arange(nl).reshape(nl, 1, 1) + 1 + image_masks = np.repeat(image_masks, nl, axis=0) + image_masks = np.where(image_masks == index, 1.0, 0.0) + else: + image_masks = masks[idx] + + im = np.asarray(annotator.im).copy() + for j, box in enumerate(boxes.T.tolist()): + if labels or conf[j] > 0.25: # 0.25 conf thresh + color = colors(classes[j]) + mh, mw = image_masks[j].shape + if mh != h or mw != w: + mask = image_masks[j].astype(np.uint8) + mask = cv2.resize(mask, (w, h)) + mask = mask.astype(bool) + else: + mask = image_masks[j].astype(bool) + with contextlib.suppress(Exception): + im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 + annotator.fromarray(im) + annotator.im.save(fname) # save + + +def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) + fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) + ax = ax.ravel() + files = list(save_dir.glob("results*.csv")) + assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." + for f in files: + try: + data = pd.read_csv(f) + index = np.argmax(0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + + 0.1 * data.values[:, 11]) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): + y = data.values[:, j] + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2) + if best: + # best + ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3) + ax[i].set_title(s[j] + f"\n{round(y[index], 5)}") + else: + # last + ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3) + ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}") + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + print(f"Warning: Plotting error for {f}: {e}") + ax[1].legend() + fig.savefig(save_dir / "results.png", dpi=200) + plt.close() diff --git a/utils/torch_utils.py b/utils/torch_utils.py new file mode 100644 index 0000000..9f257d0 --- /dev/null +++ b/utils/torch_utils.py @@ -0,0 +1,430 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +PyTorch utils +""" + +import math +import os +import platform +import subprocess +import time +import warnings +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.parallel import DistributedDataParallel as DDP + +from utils.general import LOGGER, check_version, colorstr, file_date, git_describe + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + +try: + import thop # for FLOPs computation +except ImportError: + thop = None + +# Suppress PyTorch warnings +warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') + + +def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')): + # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator + def decorate(fn): + return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn) + + return decorate + + +def smartCrossEntropyLoss(label_smoothing=0.0): + # Returns nn.CrossEntropyLoss with label smoothing enabled for torch>=1.10.0 + if check_version(torch.__version__, '1.10.0'): + return nn.CrossEntropyLoss(label_smoothing=label_smoothing) + if label_smoothing > 0: + LOGGER.warning(f'WARNING ⚠️ label smoothing {label_smoothing} requires torch>=1.10.0') + return nn.CrossEntropyLoss() + + +def smart_DDP(model): + # Model DDP creation with checks + assert not check_version(torch.__version__, '1.12.0', pinned=True), \ + 'torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. ' \ + 'Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395' + if check_version(torch.__version__, '1.11.0'): + return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True) + else: + return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) + + +def reshape_classifier_output(model, n=1000): + # Update a TorchVision classification model to class count 'n' if required + from models.common import Classify + name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module + if isinstance(m, Classify): # YOLOv5 Classify() head + if m.linear.out_features != n: + m.linear = nn.Linear(m.linear.in_features, n) + elif isinstance(m, nn.Linear): # ResNet, EfficientNet + if m.out_features != n: + setattr(model, name, nn.Linear(m.in_features, n)) + elif isinstance(m, nn.Sequential): + types = [type(x) for x in m] + if nn.Linear in types: + i = types.index(nn.Linear) # nn.Linear index + if m[i].out_features != n: + m[i] = nn.Linear(m[i].in_features, n) + elif nn.Conv2d in types: + i = types.index(nn.Conv2d) # nn.Conv2d index + if m[i].out_channels != n: + m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias) + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + # Decorator to make all processes in distributed training wait for each local_master to do something + if local_rank not in [-1, 0]: + dist.barrier(device_ids=[local_rank]) + yield + if local_rank == 0: + dist.barrier(device_ids=[0]) + + +def device_count(): + # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Supports Linux and Windows + assert platform.system() in ('Linux', 'Windows'), 'device_count() only supported on Linux or Windows' + try: + cmd = 'nvidia-smi -L | wc -l' if platform.system() == 'Linux' else 'nvidia-smi -L | find /c /v ""' # Windows + return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) + except Exception: + return 0 + + +def select_device(device='', batch_size=0, newline=True): + # device = None or 'cpu' or 0 or '0' or '0,1,2,3' + s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} ' + device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0' + cpu = device == 'cpu' + mps = device == 'mps' # Apple Metal Performance Shaders (MPS) + if cpu or mps: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() + assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ + f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" + + if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available + devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 + n = len(devices) # device count + if n > 1 and batch_size > 0: # check batch_size is divisible by device_count + assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' + space = ' ' * (len(s) + 1) + for i, d in enumerate(devices): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB + arg = 'cuda:0' + elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available + s += 'MPS\n' + arg = 'mps' + else: # revert to CPU + s += 'CPU\n' + arg = 'cpu' + + if not newline: + s = s.rstrip() + LOGGER.info(s) + return torch.device(arg) + + +def time_sync(): + # PyTorch-accurate time + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def profile(input, ops, n=10, device=None): + """ YOLOv5 speed/memory/FLOPs profiler + Usage: + input = torch.randn(16, 3, 640, 640) + m1 = lambda x: x * torch.sigmoid(x) + m2 = nn.SiLU() + profile(input, [m1, m2], n=100) # profile over 100 iterations + """ + results = [] + if not isinstance(device, torch.device): + device = select_device(device) + print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" + f"{'input':>24s}{'output':>24s}") + + for x in input if isinstance(input, list) else [input]: + x = x.to(device) + x.requires_grad = True + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m + tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward + try: + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs + except Exception: + flops = 0 + + try: + for _ in range(n): + t[0] = time_sync() + y = m(x) + t[1] = time_sync() + try: + _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() + t[2] = time_sync() + except Exception: # no backward method + # print(e) # for debug + t[2] = float('nan') + tf += (t[1] - t[0]) * 1000 / n # ms per op forward + tb += (t[2] - t[1]) * 1000 / n # ms per op backward + mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) + s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes + p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') + results.append([p, flops, mem, tf, tb, s_in, s_out]) + except Exception as e: + print(e) + results.append(None) + torch.cuda.empty_cache() + return results + + +def is_parallel(model): + # Returns True if model is of type DP or DDP + return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) + + +def de_parallel(model): + # De-parallelize a model: returns single-GPU model if model is of type DP or DDP + return model.module if is_parallel(model) else model + + +def initialize_weights(model): + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: + m.inplace = True + + +def find_modules(model, mclass=nn.Conv2d): + # Finds layer indices matching module class 'mclass' + return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] + + +def sparsity(model): + # Return global model sparsity + a, b = 0, 0 + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + LOGGER.info(f'Model pruned to {sparsity(model):.3g} global sparsity') + + +def fuse_conv_and_bn(conv, bn): + # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + dilation=conv.dilation, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # Prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) + + # Prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def model_info(model, verbose=False, imgsz=640): + # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + if verbose: + print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + print('%5g %40s %9s %12g %20s %10.3g %10.3g' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + + try: # FLOPs + p = next(model.parameters()) + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride + im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format + flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs + imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float + fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs + except Exception: + fs = '' + + name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' + LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # Scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def copy_attr(a, b, include=(), exclude=()): + # Copy attributes from b to a, options to only include [...] and to exclude [...] + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): + # YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay + g = [], [], [] # optimizer parameter groups + bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() + for v in model.modules(): + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias (no decay) + g[2].append(v.bias) + if isinstance(v, bn): # weight (no decay) + g[1].append(v.weight) + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) + g[0].append(v.weight) + + if name == 'Adam': + optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999)) # adjust beta1 to momentum + elif name == 'AdamW': + optimizer = torch.optim.AdamW(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0) + elif name == 'RMSProp': + optimizer = torch.optim.RMSprop(g[2], lr=lr, momentum=momentum) + elif name == 'SGD': + optimizer = torch.optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True) + else: + raise NotImplementedError(f'Optimizer {name} not implemented.') + + optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay + optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) + LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " + f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias") + return optimizer + + +def smart_hub_load(repo='ultralytics/yolov5', model='yolov5s', **kwargs): + # YOLOv5 torch.hub.load() wrapper with smart error/issue handling + if check_version(torch.__version__, '1.9.1'): + kwargs['skip_validation'] = True # validation causes GitHub API rate limit errors + if check_version(torch.__version__, '1.12.0'): + kwargs['trust_repo'] = True # argument required starting in torch 0.12 + try: + return torch.hub.load(repo, model, **kwargs) + except Exception: + return torch.hub.load(repo, model, force_reload=True, **kwargs) + + +def smart_resume(ckpt, optimizer, ema=None, weights='yolov5s.pt', epochs=300, resume=True): + # Resume training from a partially trained checkpoint + best_fitness = 0.0 + start_epoch = ckpt['epoch'] + 1 + if ckpt['optimizer'] is not None: + optimizer.load_state_dict(ckpt['optimizer']) # optimizer + best_fitness = ckpt['best_fitness'] + if ema and ckpt.get('ema'): + ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) # EMA + ema.updates = ckpt['updates'] + if resume: + assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.\n' \ + f"Start a new training without --resume, i.e. 'python train.py --weights {weights}'" + LOGGER.info(f'Resuming training from {weights} from epoch {start_epoch} to {epochs} total epochs') + if epochs < start_epoch: + LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") + epochs += ckpt['epoch'] # finetune additional epochs + return best_fitness, start_epoch, epochs + + +class EarlyStopping: + # YOLOv5 simple early stopper + def __init__(self, patience=30): + self.best_fitness = 0.0 # i.e. mAP + self.best_epoch = 0 + self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop + self.possible_stop = False # possible stop may occur next epoch + + def __call__(self, epoch, fitness): + if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training + self.best_epoch = epoch + self.best_fitness = fitness + delta = epoch - self.best_epoch # epochs without improvement + self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch + stop = delta >= self.patience # stop training if patience exceeded + if stop: + LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. ' + f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' + f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' + f'i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.') + return stop + + +class ModelEMA: + """ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models + Keeps a moving average of everything in the model state_dict (parameters and buffers) + For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + """ + + def __init__(self, model, decay=0.9999, tau=2000, updates=0): + # Create EMA + self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def update(self, model): + # Update EMA parameters + self.updates += 1 + d = self.decay(self.updates) + + msd = de_parallel(model).state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: # true for FP16 and FP32 + v *= d + v += (1 - d) * msd[k].detach() + # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype} and model {msd[k].dtype} must be FP32' + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + # Update EMA attributes + copy_attr(self.ema, model, include, exclude) diff --git a/utils/triton.py b/utils/triton.py new file mode 100644 index 0000000..a94ef0a --- /dev/null +++ b/utils/triton.py @@ -0,0 +1,85 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" Utils to interact with the Triton Inference Server +""" + +import typing +from urllib.parse import urlparse + +import torch + + +class TritonRemoteModel: + """ A wrapper over a model served by the Triton Inference Server. It can + be configured to communicate over GRPC or HTTP. It accepts Torch Tensors + as input and returns them as outputs. + """ + + def __init__(self, url: str): + """ + Keyword arguments: + url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 + """ + + parsed_url = urlparse(url) + if parsed_url.scheme == "grpc": + from tritonclient.grpc import InferenceServerClient, InferInput + + self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client + model_repository = self.client.get_model_repository_index() + self.model_name = model_repository.models[0].name + self.metadata = self.client.get_model_metadata(self.model_name, as_json=True) + + def create_input_placeholders() -> typing.List[InferInput]: + return [ + InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + + else: + from tritonclient.http import InferenceServerClient, InferInput + + self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client + model_repository = self.client.get_model_repository_index() + self.model_name = model_repository[0]['name'] + self.metadata = self.client.get_model_metadata(self.model_name) + + def create_input_placeholders() -> typing.List[InferInput]: + return [ + InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + + self._create_input_placeholders_fn = create_input_placeholders + + @property + def runtime(self): + """Returns the model runtime""" + return self.metadata.get("backend", self.metadata.get("platform")) + + def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: + """ Invokes the model. Parameters can be provided via args or kwargs. + args, if provided, are assumed to match the order of inputs of the model. + kwargs are matched with the model input names. + """ + inputs = self._create_inputs(*args, **kwargs) + response = self.client.infer(model_name=self.model_name, inputs=inputs) + result = [] + for output in self.metadata['outputs']: + tensor = torch.as_tensor(response.as_numpy(output['name'])) + result.append(tensor) + return result[0] if len(result) == 1 else result + + def _create_inputs(self, *args, **kwargs): + args_len, kwargs_len = len(args), len(kwargs) + if not args_len and not kwargs_len: + raise RuntimeError("No inputs provided.") + if args_len and kwargs_len: + raise RuntimeError("Cannot specify args and kwargs at the same time") + + placeholders = self._create_input_placeholders_fn() + if args_len: + if args_len != len(placeholders): + raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.") + for input, value in zip(placeholders, args): + input.set_data_from_numpy(value.cpu().numpy()) + else: + for input in placeholders: + value = kwargs[input.name] + input.set_data_from_numpy(value.cpu().numpy()) + return placeholders From 255ffcb60529c32613e655b086ce01a744bba17c Mon Sep 17 00:00:00 2001 From: Giannis Pastaltzidis Date: Sat, 29 Oct 2022 01:59:46 +0300 Subject: [PATCH 2/4] [Fix]: Script improvement --- SimpleHigherHRNet.py | 130 +++++++++++++++++-------------------------- export.py | 8 +-- misc/utils.py | 6 +- scripts/live-demo.py | 18 +++--- utils/dataloaders.py | 4 +- utils/general.py | 4 +- 6 files changed, 72 insertions(+), 98 deletions(-) diff --git a/SimpleHigherHRNet.py b/SimpleHigherHRNet.py index a11a2ec..7eced8a 100644 --- a/SimpleHigherHRNet.py +++ b/SimpleHigherHRNet.py @@ -107,7 +107,7 @@ def __init__(self, max_nof_people=30, max_batch_size=32, device=torch.device("cpu"), - trt_=True): + trt_=False): """ Initializes a new SimpleHigherHRNet object. HigherHRNet is initialized on the torch.device("device") and @@ -168,60 +168,54 @@ def __init__(self, else: raise ValueError('Wrong model name.') - checkpoint = torch.load(checkpoint_path, map_location=self.device) - if 'model' in checkpoint: - checkpoint = checkpoint['model'] - # fix issue with official high-resolution weights - checkpoint = OrderedDict([(k[2:] if k[:2] == '1.' else k, v) for k, v in checkpoint.items()]) - self.model.load_state_dict(checkpoint) - # if True: - # import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download - # # check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 - # if device.type == 'cpu': - # device = torch.device('cuda:0') - # Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) - # # logger = trt.Logger(trt.Logger.INFO) - # with open(w, 'rb') as f, trt.Runtime(logger) as runtime: - # model = runtime.deserialize_cuda_engine(f.read()) - # context = model.create_execution_context() - # bindings = OrderedDict() - # output_names = [] - # fp16 = False # default updated below - # dynamic = False - # for i in range(model.num_bindings): - # name = model.get_binding_name(i) - # dtype = trt.nptype(model.get_binding_dtype(i)) - # if model.binding_is_input(i): - # if -1 in tuple(model.get_binding_shape(i)): # dynamic - # dynamic = True - # context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2])) - # if dtype == np.float16: - # fp16 = True - # else: # output - # output_names.append(name) - # shape = tuple(context.get_binding_shape(i)) - # im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) - # bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) - # binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) - # batch_size = bindings['images'].shape[0] - if 'cuda' in str(self.device): - print("device: 'cuda' - ", end="") - - if 'cuda' == str(self.device): - # if device is set to 'cuda', all available GPUs will be used - print("%d GPU(s) will be used" % torch.cuda.device_count()) - device_ids = None - else: - # if device is set to 'cuda:IDS', only that/those device(s) will be used - print("GPU(s) '%s' will be used" % str(self.device)) - device_ids = [int(x) for x in str(self.device)[5:].split(',')] - - self.model = torch.nn.DataParallel(self.model, device_ids=device_ids) - elif 'cpu' == str(self.device): - print("device: 'cpu'") - else: - raise ValueError('Wrong device name.') + # checkpoint = torch.load(checkpoint_path, map_location=self.device) + # if 'model' in checkpoint: + # checkpoint = checkpoint['model'] + # # fix issue with official high-resolution weights + # checkpoint = OrderedDict([(k[2:] if k[:2] == '1.' else k, v) for k, v in checkpoint.items()]) + # self.model.load_state_dict(checkpoint) + + # if 'cuda' in str(self.device): + # print("device: 'cuda' - ", end="") + + # if 'cuda' == str(self.device): + # # if device is set to 'cuda', all available GPUs will be used + # print("%d GPU(s) will be used" % torch.cuda.device_count()) + # device_ids = None + # else: + # # if device is set to 'cuda:IDS', only that/those device(s) will be used + # print("GPU(s) '%s' will be used" % str(self.device)) + # device_ids = [int(x) for x in str(self.device)[5:].split(',')] + + # self.model = torch.nn.DataParallel(self.model, device_ids=device_ids) + # elif 'cpu' == str(self.device): + # print("device: 'cpu'") + # else: + # raise ValueError('Wrong device name.') if not trt_: + checkpoint = torch.load(checkpoint_path, map_location=self.device) + if 'model' in checkpoint: + checkpoint = checkpoint['model'] + # fix issue with official high-resolution weights + checkpoint = OrderedDict([(k[2:] if k[:2] == '1.' else k, v) for k, v in checkpoint.items()]) + self.model.load_state_dict(checkpoint) + if 'cuda' in str(self.device): + print("device: 'cuda' - ", end="") + + if 'cuda' == str(self.device): + # if device is set to 'cuda', all available GPUs will be used + print("%d GPU(s) will be used" % torch.cuda.device_count()) + device_ids = None + else: + # if device is set to 'cuda:IDS', only that/those device(s) will be used + print("GPU(s) '%s' will be used" % str(self.device)) + device_ids = [int(x) for x in str(self.device)[5:].split(',')] + self.model = torch.nn.DataParallel(self.model, device_ids=device_ids) + + elif 'cpu' == str(self.device): + print("device: 'cpu'") + else: + raise ValueError('Wrong device name.') self.model = self.model.to(device) self.model.eval() else: @@ -229,32 +223,8 @@ def __init__(self, # self.model = TrtModel('pose_higher_hrnet_w32_512.engine') if device.type == 'cpu': device = torch.device('cuda:0') - self.model=TRTModule_hrnet(path='pose_higher_hrnet_w32_512.engine',device=self.device) - # Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) - # logger = trt.Logger(trt.Logger.INFO) - # with open('pose_higher_hrnet_w32_512.engine', 'rb') as f, trt.Runtime(logger) as runtime: - # self.model = runtime.deserialize_cuda_engine(f.read()) - # self.context = self.model.create_execution_context() - # self.bindings = OrderedDict() - # self.output_names = [] - # fp16 = False # default updated below - # dynamic = False - # for i in range(self.model.num_bindings): - # name = self.model.get_binding_name(i) - # dtype = trt.nptype(self.model.get_binding_dtype(i)) - # if self.model.binding_is_input(i): - # if -1 in tuple(self.model.get_binding_shape(i)): # dynamic - # dynamic = True - # self.context.set_binding_shape(i, tuple(self.model.get_profile_shape(0, i)[2])) - # if dtype == np.float16: - # fp16 = True - # else: # output - # self.output_names.append(name) - # shape = tuple(self.context.get_binding_shape(i)) - # im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) - # self.bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) - # self.binding_addrs = OrderedDict((n, d.ptr) for n, d in self.bindings.items()) - # self.batch_size = self.bindings['images'].shape[0] + self.model=TRTModule_hrnet(path=checkpoint_path,device=self.device) + self.output_parser = HeatmapParser(num_joints=self.nof_joints, joint_set=self.joint_set, max_num_people=self.max_nof_people, diff --git a/export.py b/export.py index 4e07bc5..c4b66da 100644 --- a/export.py +++ b/export.py @@ -459,7 +459,7 @@ def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): def run( data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' weights=ROOT / 'pose_higher_hrnet_w32_512', # weights path - imgsz=(512, 512), # image (height, width) + imgsz=(512, 960), # image (height, width) batch_size=1, # batch size device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu include=('torchscript', 'onnx'), # include formats @@ -495,7 +495,7 @@ def run( assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' # model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model model = HigherHRNet(32,17) - model.load_state_dict(torch.load('pose_higher_hrnet_w32_512.pth')) + model.load_state_dict(torch.load(weights)) model.cuda() # Checks imgsz *= 2 if len(imgsz) == 1 else 1 # expand @@ -583,8 +583,8 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'pose_higher_hrnet_w32_512.pt', help='model.pt path(s)') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'weights/pose_higher_hrnet_w32_512.pth', help='model.pt path(s)') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[512, 960], help='image (h, w)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--half', action='store_true', help='FP16 half-precision export') diff --git a/misc/utils.py b/misc/utils.py index d07acab..2620b7b 100644 --- a/misc/utils.py +++ b/misc/utils.py @@ -4,7 +4,7 @@ import torch from collections import OrderedDict,namedtuple import tensorrt as trt -starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True) +# starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True) # solution proposed in https://github.com/pytorch/pytorch/issues/229#issuecomment-299424875 def flip_tensor(tensor, dim=0): @@ -372,12 +372,12 @@ def get_multi_stage_outputs(model, image, # but it could also be (no checkpoints with this configuration) # [(batch, nof_joints*2, height//4, width//4), (batch, nof_joints*2, height//2, width//2), (batch, nof_joints, height, width)] if len(image) <= max_batch_size: - print(image.size()) + # print(image.size()) # starter.record() outputs = model(image) - ender.record() + # ender.record() # WAIT FOR GPU SYNC # torch.cuda.synchronize() # curr_time = starter.elapsed_time(ender) diff --git a/scripts/live-demo.py b/scripts/live-demo.py index 7c5c174..fa6a4fe 100644 --- a/scripts/live-demo.py +++ b/scripts/live-demo.py @@ -15,7 +15,7 @@ def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, image_resolution, disable_tracking, - max_nof_people, max_batch_size, disable_vidgear, save_video, video_format, video_framerate, device, extract_pts): + max_nof_people, max_batch_size, disable_vidgear, save_video, video_format, video_framerate, device, extract_pts,trt_): if device is not None: device = torch.device(device) else: @@ -25,7 +25,7 @@ def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, else: device = torch.device('cpu') - # print(device) + has_display = 'DISPLAY' in os.environ.keys() or sys.platform == 'win32' video_writer = None @@ -50,7 +50,8 @@ def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, return_bounding_boxes=not disable_tracking, max_nof_people=max_nof_people, max_batch_size=max_batch_size, - device=device + device=device, + trt_=trt_ ) if not disable_tracking: @@ -61,23 +62,24 @@ def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, frame_count = 0 pts_dict = {} + t1= time.time() while True: t = time.time() if filename is not None or disable_vidgear: ret, frame = video.read() - frame = cv2.resize(frame,(672,376)) + if not ret: break + if rotation_code is not None: frame = cv2.rotate(frame, rotation_code) else: frame = video.read() - frame = cv2.resize(frame,(672,376)) + # frame = cv2.resize(frame,(672,376)) if frame is None: break - pts = model.predict(frame) # print(pts) if not disable_tracking: @@ -132,7 +134,8 @@ def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, video_writer = cv2.VideoWriter('output.avi', fourcc, video_framerate, (frame.shape[1], frame.shape[0])) video_writer.write(frame) frame_count += 1 - + t2 =time.time() + print('\rTime elapsed == ',t2-t1) if extract_pts: np.savez_compressed("output_pts", pts_dict) if save_video: @@ -170,6 +173,7 @@ def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, "set to `cuda:IDS` to use one or more specific GPUs " "(e.g. `cuda:0` `cuda:1,2`); " "set to `cpu` to run on cpu.", type=str, default=None) + parser.add_argument("--trt_",action='store_true') parser.add_argument("--extract_pts", help="save output keypoints in numpy format", action="store_true") args = parser.parse_args() main(**args.__dict__) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index d849d51..76a8f40 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -28,8 +28,8 @@ from torch.utils.data import DataLoader, Dataset, dataloader, distributed from tqdm import tqdm -from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, - cutout, letterbox, mixup, random_perspective) +# from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, +# cutout, letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first diff --git a/utils/general.py b/utils/general.py index e2faca9..9b10734 100644 --- a/utils/general.py +++ b/utils/general.py @@ -35,8 +35,8 @@ import yaml from utils import TryExcept, emojis -from utils.downloads import gsutil_getsize -from utils.metrics import box_iou, fitness +# from utils.downloads import gsutil_getsize +# from utils.metrics import box_iou, fitness FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory From c8d49cef1e389763f75aec9af8687f03ed8896c4 Mon Sep 17 00:00:00 2001 From: Giannis Pastaltzidis Date: Sat, 29 Oct 2022 02:02:11 +0300 Subject: [PATCH 3/4] [Fix]: Script improvement --- utils/__pycache__/dataloaders.cpython-38.pyc | Bin 41659 -> 41487 bytes utils/__pycache__/general.cpython-38.pyc | Bin 37920 -> 37812 bytes utils/activations.py | 103 --- utils/augmentations.py | 399 ------------ utils/autoanchor.py | 169 ----- utils/autobatch.py | 72 --- utils/aws/__init__.py | 0 utils/aws/mime.sh | 26 - utils/aws/resume.py | 40 -- utils/aws/userdata.sh | 27 - utils/callbacks.py | 76 --- utils/docker/Dockerfile | 65 -- utils/docker/Dockerfile-arm64 | 41 -- utils/docker/Dockerfile-cpu | 40 -- utils/downloads.py | 189 ------ utils/flask_rest_api/README.md | 73 --- utils/flask_rest_api/example_request.py | 19 - utils/flask_rest_api/restapi.py | 48 -- utils/google_app_engine/Dockerfile | 25 - .../additional_requirements.txt | 4 - utils/google_app_engine/app.yaml | 14 - utils/loggers/__init__.py | 404 ------------ utils/loggers/clearml/README.md | 222 ------- utils/loggers/clearml/__init__.py | 0 utils/loggers/clearml/clearml_utils.py | 157 ----- utils/loggers/clearml/hpo.py | 84 --- utils/loggers/comet/README.md | 256 -------- utils/loggers/comet/__init__.py | 508 --------------- utils/loggers/comet/comet_utils.py | 150 ----- utils/loggers/comet/hpo.py | 118 ---- utils/loggers/comet/optimizer_config.json | 209 ------- utils/loggers/wandb/README.md | 162 ----- utils/loggers/wandb/__init__.py | 0 utils/loggers/wandb/log_dataset.py | 27 - utils/loggers/wandb/sweep.py | 41 -- utils/loggers/wandb/sweep.yaml | 143 ----- utils/loggers/wandb/wandb_utils.py | 589 ------------------ utils/loss.py | 234 ------- utils/metrics.py | 368 ----------- utils/plots.py | 575 ----------------- utils/segment/__init__.py | 0 .../__pycache__/__init__.cpython-38.pyc | Bin 173 -> 0 bytes .../__pycache__/general.cpython-38.pyc | Bin 4575 -> 0 bytes utils/segment/augmentations.py | 104 ---- utils/segment/dataloaders.py | 330 ---------- utils/segment/general.py | 134 ---- utils/segment/loss.py | 186 ------ utils/segment/metrics.py | 210 ------- utils/segment/plots.py | 143 ----- utils/triton.py | 85 --- 50 files changed, 6869 deletions(-) delete mode 100644 utils/activations.py delete mode 100644 utils/augmentations.py delete mode 100644 utils/autoanchor.py delete mode 100644 utils/autobatch.py delete mode 100644 utils/aws/__init__.py delete mode 100644 utils/aws/mime.sh delete mode 100644 utils/aws/resume.py delete mode 100644 utils/aws/userdata.sh delete mode 100644 utils/callbacks.py delete mode 100644 utils/docker/Dockerfile delete mode 100644 utils/docker/Dockerfile-arm64 delete mode 100644 utils/docker/Dockerfile-cpu delete mode 100644 utils/downloads.py delete mode 100644 utils/flask_rest_api/README.md delete mode 100644 utils/flask_rest_api/example_request.py delete mode 100644 utils/flask_rest_api/restapi.py delete mode 100644 utils/google_app_engine/Dockerfile delete mode 100644 utils/google_app_engine/additional_requirements.txt delete mode 100644 utils/google_app_engine/app.yaml delete mode 100644 utils/loggers/__init__.py delete mode 100644 utils/loggers/clearml/README.md delete mode 100644 utils/loggers/clearml/__init__.py delete mode 100644 utils/loggers/clearml/clearml_utils.py delete mode 100644 utils/loggers/clearml/hpo.py delete mode 100644 utils/loggers/comet/README.md delete mode 100644 utils/loggers/comet/__init__.py delete mode 100644 utils/loggers/comet/comet_utils.py delete mode 100644 utils/loggers/comet/hpo.py delete mode 100644 utils/loggers/comet/optimizer_config.json delete mode 100644 utils/loggers/wandb/README.md delete mode 100644 utils/loggers/wandb/__init__.py delete mode 100644 utils/loggers/wandb/log_dataset.py delete mode 100644 utils/loggers/wandb/sweep.py delete mode 100644 utils/loggers/wandb/sweep.yaml delete mode 100644 utils/loggers/wandb/wandb_utils.py delete mode 100644 utils/loss.py delete mode 100644 utils/metrics.py delete mode 100644 utils/plots.py delete mode 100644 utils/segment/__init__.py delete mode 100644 utils/segment/__pycache__/__init__.cpython-38.pyc delete mode 100644 utils/segment/__pycache__/general.cpython-38.pyc delete mode 100644 utils/segment/augmentations.py delete mode 100644 utils/segment/dataloaders.py delete mode 100644 utils/segment/general.py delete mode 100644 utils/segment/loss.py delete mode 100644 utils/segment/metrics.py delete mode 100644 utils/segment/plots.py delete mode 100644 utils/triton.py diff --git a/utils/__pycache__/dataloaders.cpython-38.pyc b/utils/__pycache__/dataloaders.cpython-38.pyc index 5c1e9c3f8684fa517a1942a89f812f17d98f9dc0..152bf1a2e5b145c1dfc395d12d138e6f28a33501 100644 GIT binary patch delta 12262 zcma)C3wTu3wa%H$Oolul1R~*`pacdI5|W@i6i9|7ghvvRpfVuC$(%FEka@AsOi0q| zNTF(51>;7sK7-n8(fXk2OTD+$R$F_m^?|py9&atydRw&h+V9e@kI(8||2jivi1FT= z@0&mSvG&?)uf6tKYwww7p0^!&&Q`F!pddGk{(8N;{O=7H%vGK~xwJ~j4%d05suU-c zEKSi0LaW1TylbdD*?YM*#e0QT=v}K#^{&&`>&}sStw@`8+;&j$Hl*s)Q+1ENf%}WK zi_kyPcsxs+(P#5E>1BJHM>ZbM+K_e49B&J1ddti#t)$O(t0Ep!vTjxO{g6HnX~~(| ztV3DeP1(rKOHN?A%T5FS99W~eK z<-~BizEoQ=J1d}Qmui>Mlvj?`S8A)MeihYM&(6};Xk~$H?Q-o3YP(untF0quJBZnO zt)6(*(-dB+HE15HdGvXEckbOq+^^AEv?i^Y#_ZN?omuWpL-TVtsB(7psZrJbs-k#h zt@BrP%W0?n?y2gHhV`nRm}zWJtawc7(Y#ptbbdS>Hp&{;^vo`v7HXC3EI0!^N zN;xPs5VV?heIOWc7kG1e^f-!UHWdWy$}1=5%~|1{(zKT28Soz&G) z-`d&U*4gOI?P%+&?`rhbTvqMP<>o6JJE&dy@|Ml*#3U^QrZcR^d!w4Qh}23R$*Wg( z-d0i~?fC~TSjMgSrN!Mes_AzG{Kt}ty3yzJUsm$4G@$^><^7W@l!*M<9nnkA#VeK3y21kK6Vo zR853H6G1&kt5D;rp~p>IM5E7sA<*m$MO94~q925r`<^J=s~ncg=T4XB3fC(K8(nJhmjimzm1yXhc_dvqRS+9Ucv zT--s_ZW|9W6<_8!u(@5{HSL;~VHEG>I)Pc>^Z7%nVfgh>$d@_fE|9Q<2dH#6eGC_Y zOR+oh9jwLT*_q>G>o&#Xx@59T(;$jk{xBSG># z?gDG0oklbv{JQuy*t4mB7p3n2cs6HD6>8WJ6G$bV0oAxQ3{%@F`WVaPyjc-z!AvsY z9*sUaYqFxW$){(RCDXIxsGrCdtzgf`o)un>DoUh{-Sp3Ns6wcN;%SuaJwola1o{@} zCK4BmGAD)fIN42ibYR??J>p9APgMKQv0!nG+afz+qVe0r7`d9{N%biOG_%In@q#%Q{ z@Qi+AuESOhrO?qIrGKU)6y2{2Gp}W9v#+VGV{3g^=cmj}zCHiu+LzOV#H$#YcV)|l z&m2Mo;{9hL)L$n^nI$tGw?CFmz5~iLfNBEM84ObtHM~w{Zg_2x zAWhbvp;uPvuxwv&qQQ6y zgwO>q4cw*YzTMZ}L5W0DNB!1DpQmkmYnP>jbJXvhIo99S(bCvTiA76Wt8Z&# zS3PGD=VJj&&cbEHzISEAqDOo!n2QB*ifEi7x_4SCL0MJp^K5DJY`&_6vXfaEjgXt8m|Vqd9hXoukBWNk0%T@_sUlmFN|JrIx%9>|8e-S)8ZolN7S_Pxx_p0 zQ|kSIK1M!)%a&W_vbz?$T&`U4Qw~W3?mgqY!n^L*ZOY^FL*ZT)&-%P_P#L!Evv*os zOKX^c>AnF-PscNzmQyR&Tbb4$(S1tps_axQmQPl`q1-9Iylg?qW7IrkFID5EL)pt} z0z;0{ruI&^sJpFXx;(sll6-rad({sx6eooA3B#md)vLyIo{>B{ANjPcrq_8|daABf zcFAX}N(=e0q>0{xEv1QZ~RwwNB#H4fNpq2BQpdR5DYu8rQ}F}t+vc4K2p^TsZ3e!986Q_y+L z5`P6_M}U&JxM>fo@m@1Y(*%MPWp(l2pl||K1Bw7w6OdZss)(n;7?u5frZW)PUq+%vW;I6EL`zMqN(Z=h6Y+|?#<6>cXBRLA3jeJ5AEP1dzj%?nz8BJ<5Q;WX)4eb2%H8qrhBfA6egC7dm!rlwX-=>*ON zT}$n2UU1I9&XQfNrGG=VB}>FhROk{f!XN&ne%maFMiz<$TBjRmTFIwncY{ayIq05l zXlUf+vF*e0B4M^VmenknwwTZ5n?gevJM!=q^+=JD8jUBL8Fh<(Fp2hd8qlaVs^V4Sm=O0 zKtR6IJlndwR8ZYJxjxjLfH<548s=p6g1Y!O4EhDYK|l$e;Y0fTCR$bk@-g2+0Kx}C zg$TyXT)y>%f)PpuSD`a)5WmjKs?HL>#MsT~;n3HFv967cTiSiwJGO{AbZiB50CvkG z8|Nq2gC?KfGJL!@hG;?WLa>6>o4F139jS{!I(qT(GqZ25O zz*bRBB~yu;if-mwih;)vl^}Q!yn((kV7VGBG}sysPZV*&v5Ut2fWgR@TW)t@AFyQd5SA(t1EW zpaI|ktd=*oRu%Hu#VleY;Bxt5>z*Vyn|33vhebAKod&@Eiv8#*qUae64Ep#kWf_8i zOSY_ujfM_}wZEbp9dDt^GJOu!B2=#iuw&+o;2LzSN828Hm9}mpB!fzG!2(SuIYqkH z2-qn-yYaeleyqJ<mKXtLLK=e|l|ecl$a@GcHE!I{9XoCy6jZx4cj=780T!%Y7D5 zPT*c3LLg*sgSmwd#Cdo!^o65_8uW|XshKWxYD5cCltH>B`T=b5F_iiMd?=m+V?Gq@ zS(l+BiTEYvfo~DGM8iHx*o~O(k0Y(Zp%(K2oS5#QA>KSS!4B8gYxG;!%=^J<1h5N? z9P|biHO$dH!k z788V!*JDRs{Sx+VH8_jV(3pfdu8~mZc?Vi&k29 zJ&r)>lfyx8?DSYp(F8jkbKML0K7dc}gH$^2=l+Sx-W%5cBr!RG;ZO~tA+6MfV zXuOGh9!6xto|+XrGf$e19gguj-3$R(u^dH?kDl0b(!QYqt>1+RHg+r)P(4fVO@$A~ z_oyzO0&v>hj^1s6dO!yyfF<(3_B-T%+&X6=C4Ly)N;|}d02@5s)@tQP`t@uE;u&zw zFanB!sy4tl_-QkEJaDl5#~AS~0+N`Mwh_$m9kiw`rdD3n<4JOzfoD)m#h*cQn#Qf8 zus4?Bihl-^&oU`-3UiiNMgw~h&w`xY@OWE6?++F7Y?g2Q5eWYu=E0`OHiFd1G8C8x z8_JlC$Y^g#64QEZy)mzCf6PjugB_eAL1I}yP*YHXk|RreH2 z5ROY6Y@C#AQi3S%0|%dUh3BuCMchm~yQ7ye`CI5>(dSW>g3o6PTq?W6?n_t=kljm9 z{maEf3pga72=Cd$m*f>h*worYf0=hg$T~wD$IK4^9t7+mQtR4Z(1J%uUu$E1M`LG~ zcnp)}S#6%}4J{t=I9g;hvd}w>k+>`f6~pMsr?M-IiI@?gnYdC zxLypg%$!g+U!|>k3!CUopyMNsvu!G=_YiBt-Q!(GLT&R8=hMgmxAkRf>uhf z*nEG9LBB;apI-K^d(ieQ+Mp{@DX$qUN%~P_u2k5u+fRwEJq7C`ewBcVZY2HaVP@T6uoJs(*L!r>O|ODD*ZMZ z9-i*bP68i~JbvwEWLJ=lxZj4fQ`w0{F1_P^i#)$#-Gm4jB-yYQYo1x|CYS`D&A)+vg1}^ z?VEkfoawn1+JI?(ChylT;LTjN$(yhLyyB9R4s76eAxk8dy$!6=eg+}<%moZi$?Fc3 zE8FDP4pb(2cb(Q9RBVNOI0bYgqHI)+r|pt%kGkGxy{YGcY$xCdrb_#&UvUT8a4z`; zVf;FEbns$lgE+}5=i^?1p80@>08P~GE)sWwijUxXXbtfol-@?^dXx?Tjsp0UGBdU- z97{8Y{TC&i2SG7{QEYzJm2?W~tT6V;hvl!o;I|#dgXE0Sod=I(D;4tn8!D5(C$cUw zrgy;6MZelH;=X7f&ultylQyW6<*<__x~G^Sd9!n6;n;SlAVYC!kl z8p*2^)6tdFo9M<{6uxBSH>uxY6kMm!W@1`=YZ0e69F2a*a{h)>A)malYCbPEmnIey z-Lr>BKfaN^=EoD_T)E^+Wf$|oP{b|Z-$_7MvTV4>43m+OFa2vZ&;2F@V6OqY;1fbi zEx=@F(7=Q8QLP#6{qi)$$?l*|9{7q!gZm(DMKoi7ke+ea3*}>0X5~U}E`Ix>`st@x zA>xJjZ;E{EmKDiU5RWsLN2%96 zUAQ163-K#Vl8b)cnj}i!1e^psfnJUxY~C=$j7OFdRQWo}hK58zoCRRD=l6wnWG-DY z!s-BRWtd*vd=#`T^E?UKYvpx^XJ-BgYxIG`J8YLAsFEL|SV=#d(k65VwQ~OO4)>#& zE89xVIodd5QMxIm-qzFVIW3P5w-s}}74ve*>Xwe%9&3fk(tMM`!AMAt^u&89(?py# zUERTm_1i6uldKI`7^#=<12=%%Jw%ylHMhyFs~gd=0YD#+qrsd+S5xc9N}P+3=xFxl zCL(+f3(=Q?rSe9(WgTRr2O8)yy;Y_&9u3ivJHO&hV5#(z!!E&=TI<1x)3WGjol+&6 zkIr*mOhlCJ^c|g(vl!2JN5=TVb@I?;VK1Asa zkidLQ`(7g&p>s(uEp!bhG~6!h0Rr}8=By^~+D*@-);9}Z#yF%2-dsM=oXVvi#Ytnl zD1N}Gk?U{&`;?h9=TJ_m^<;1HMQ#*9wO zcfZ=)a_f0fywg%S>!6REOT<7dGt5FtS^T{jE#4VpWEXx*Ah+GM!p%CucM{f<-50_9 z2Ebi_^dg2wpSa7Wa8gW)+JR00<&iZQTn#8eEBlP)s8*uN2FMNud6f0kRYkpCm%l&m ziH4%WTi|Ki($?W?Xzr-#Xl@Y8sQ3J@NjQYD-@gf?pX55iAEnhQe$<+wa8}VHa@##y zcOJ*g4*0VfPMnX0!mTFjMn!MoHXz~`19zv zmU{u$@%4cHIY;*E(f9?xLBJxw0l;EFHvxUk&cTHRW?9lw-w2|c04yJ$m9LJYuZONDJYfG*mQQi5tmhyL&P1cJ~0})dTa@rRx%z{Kb7&4ai(9bf28j^R9RLXxJnI{(W z0h>Pn&H)YsJ|Qq&NQUS~y$0Rh@msW}Xb8XNWlO;xWNgjYi8Wo9hof2|q_2glj63DL N2Wk|%?0lg2e*j#oLbm__ delta 12359 zcma)C34D~*wa+)1Ool8ZBt#Mdk^l(|2C_&9TYw}aVM~HZ02zg0GT)bEU^ebIlda-t z-FNib;zAX8&xQKb(rBMP?P6^jDwP>U;P0?3voT$1bSU;#!KX>DhtB!CH51aEW_K zu+Ci> zCI{9Ao88US&T+Tsx$ahdihG@&=U#7YFdTiPQA!?vq5_olIa0r%sYyUl24g!A-i zKseNKI7KhqV{>mdN_Ss$sPk~j+LRqL++FAyT}3H+(H`4=O&r%!_G^3Jr_XVHx?X&B zio08%q0glMTlDkwS@eIaUZR)M|84quyLg?`Z`+EuiLs(TpQ(YY|qb{WKC`e^u&TjDC&*+ z!y(hk^v3$o_4JzqmVD{%qV{}Wz-yZR-eFHvctd7ySOiTg!xxSWdm>&lYFG|mEED~}fF;h-mC2s2{%qW%G6^wHFpHO-P&q`kFj1sT=HZI%7u zpi$WeTHZkABA?gSZ&XIZ!q;EfAM2@%1!&vAaMbTJJ;UKZc%ar3#I{1-?l_nU5`w`> zxio#2wn1K;euK77o=D$u*4zf?Ty5KE*m+RX+GJM7^b0o9!c*y3y@^1(WjBWWy{>F4 zz0ZiEX{Ay_!0s)1QO1l~E2m}C`pus1j?JFNj_&S`HfvIMXG43}=8mprE2FccyP><; zQ&U%EW$^IU=1v-yM=}=8+<{HHK*$j^qWxiA@jJ%!?=u>-fV}ehsWNi8UAAUkxhCmI zSd?iRhVB^*i#>+eMFXaTK#DC(%hH5Nt(N~nAAfujv1-(r^%E_#4FeiLr96>cp~Yp+ z1t0EiK=(pa(La~K%JfSrtE4>7|h zGsR^*wm0V0#XjJ~(NEVayiu=dL@irLr_Z3c3d4>-*sB{tTnEG?dC8}8cH8bxky8%j z%eve(+8uHz_mA2k>7TMfS;I;!I3gdP;-0^lnyyrFGe!;pfbvmIJdAcS&FZ}9Qbx90 zX~c4$!D}pg$QX)>hX~tc<3*O{NnQss56ZXlc5FC;=40F^U}bwe>`^`=5bz`~c?2lT z;T_bvmp-PGfK#)lWu-Y&#U1Kk2lA`52jmO+F?9ru86V?;+vB-7B8{n+&f_E4EW4jv zN1;86{gO_K?t}8ZsTZvKDw?AJPXIg#z^8Z+t;7;;F~<+k1dhnoNgO+f-v_|&v{6y; zqUK<8bdTBjB)Mx^aXAgvfNukQhkz?zoR9W11gtbO67WYYhv^fB5mHtFPAn&xvB&g}7-B?zrLZRc zEi7SbX9!v8!Ene=5z3XV6z?#@G2t`B_khc`{{mXy2jJZt5wGwDO-1Kh7(PQx{3p8O z&H!0|lRoAm=`9MW12f8mDjR*jXtJhlmw%sL8c*yF-#?lvE(UqD1*@R)bc>`BrnZ&l z6~a3#UPRm8C%iq1q3;530wJnHO~dTC)AoUa{FmbV1wX`WBLSCP$tTqxtP)!=Z}SI} z&nn+8Zr=Jd(Eo!#ce~3W#+e=@A=v6(Ln|RCZn46jq6rv+GCfm2EPz-pU{#j{(@Sq{A%=K-po|cZzwubJmf3PpPVAjp+ewkP# z-oeVut?SpGa|t1clLS7c4|C{9v89}s(js=tPiGBljdG}@B(D}&V^P%9{X#U$FPF^A z5|$L=>Lkqm?1KW@p@q^`~aAL1XwPEr89C9a%2%6k$0Eg zRxq(~)zAL9R%)dU_;n-90vvI0yX0f7a>Q0Lf5hf05y$1?xur8+!Uk+>FQdiMEf#-6 z`%iN3-26Cm@MrY-D37DXto=7yKP6x}1*KiTF<@Bf!Uz}?`^ASCw+n+Z15C?u^pZ0f zrYPkOSrlgeN}YdhoFWXH6h9;2N>^bXA{WO9V6&)az#9-h#}I5)yb3T*P_N16vS$~3 zOf#-bW=1iQwH4B7@@t}$BK+)Ri36z9Zegztw)7%>AV>;yh#+tb1mW%2Qz;+ph-cq0@ea_L|yyCu%SEw-%J3FUqIm{|mzwv1Az-8tXsbY=+5_~>)y zPnI08%jf3r%07#a(M7JG+O%&{0w|Q1i7B$8BD)mi={P0y?;l(gV<)A%VvVYR;C5u4 z4^(t%74m;7Ue}JwR~F7L{00q=*h{?8l9AL!HN7KgB`uq~Tw)dHnd7yS{Q0@-_KT<-DfZ;Fc|U4q7m9;e_Eh{bonR2pKx3OW~mjYid)&X3yqMDo9&88`_#Z z+t#n`UT4WERp*^&WhLe|cZvO=Gf9*6Rny~ksLO$YwE)Ecegbs3s8>YCk^;4Tdn`vR zpu8$M!bgJM0XmSF^L zsNAg>T@{x`?bY*8u~*c%tD=NJfewx&76Kq%sPJJ!cP;>*2fT@}{8~-f@`uq}iW&Aw zBvnyIT}$h=(#@F9XPD-Mp1xKVT&T}N2os+GOyo8X%DXQ78*!(qcT_bQ^4Y~xxUz9+ zP@2aZpgYfmMp`dFSiH9kRzs%DL7Jh7fXT=P8ew{-;QoB7~MBGzig+Y#& z`MC((M@vjQEkN#Bq~fw&1?ue}(+0qW!YXRHayevMsop?9ltXl-G>Th;Kf2NI4_m2W zp<7dQ!z61NZU_Xn`RS53XbKIu&;s2i=ys?M6$b5E%%DKv=qq3^ly+G+9B*I66hxuK z%qM?M{%(m^iPEO(Gd8n?d>EfmACqQX`PMvaa#rjhf-dKTF^p~hL2Srn1sCT`F9DeH z6ixo2?mQd)OJBNE`$+aKb+2G)y^XcBAw`iz(V-|sh-Qq;1`iw`Y?jUR(x*qAODR)+ zvUKU}T0*o^O(Wvc+>=mkM!eB}Q=zTL`jv9ave{%-xd*1oo0lCdyMY$;UC4hYSMv!^ z<2oWR3Dwlg8ymygartp$ zi&iNcnojg$j#x;Igd-%iWz@5@J;d_?&`B(W;s1rGp0Qgpn621l3xJov$W*eul7P3z z2$&^CFcKXu@rOzj!|=(O@^{U}3&*yjvfk6{4;bcAwWo>+31b@5&--hePp`;i%4(4? zu<3Yg;;>Lwx6G6|&2!n2kCNnpcrNUs)7a(U>L^7B)8NMzbK)g(VN2h1NJy*(Sr$%` zgydr_MUI`A*(HC}QdhtcI0p0qfXe^|Wl?Lfx)m%ze+mI>viGbvAMtCKs4 zbZ!!DSkeoy2LOS>vg2aUchsPj%?}113ZEvuOo7obg7GW3ogO$EffplY{aAtEN*U#xfJdS~M z3Uu^5Vxn49rNBH;`U}r_MV}d_1bP$o#D4k8x}EVEn6UeU-iT$Vj2fkk6Xm<5tja4vDWOkJz^v3EObi3?b?VSa~kls;(^fBn;8%u_|I(%H7SiiUk>5{HPa4h0jS*WG$7F%TH23N&4%-SP8 zbZB#8S8;Vl3i{*W7=g-Aag|b$(wgb2UE&;d{pi92pR+-CQ zj$PQxY1dipyW*e>AsRuEid`XE*1Fh_nLM%*yik&?`wS1WE-nKJn4dB-8&>d*>T?)F zQOe>MccNR2F6tl^$FE0g0244fF-(u$r!Ca<(q6Dw%Rw=eE+PVUO|M{lZgw)(1qgxCn8iR<5NaSaeTAo0!-AZNnOJ;AW) z_4}9?@+f*K_ESniaVBm9V0*s-t*ZgpC8mKoy95W?R!qd#VQ?Nm5diCWK3ZJ2;+P^2 zT`oob9o?q@f;48O(L+y|p0j%eJyVBbK`L7kbqYjD)esa*&zm9L7)m-wH%NkuW&!K4 zZ2eW1ZLmsJv@9KLk0(QVSXa-U2Wh~ap@Nr(Ud)q~Ir`%*U(r0Dfhgq3H5p}yY?6r~ zB4K)Wr65R;Ha8LtWz{#K#ZJu!8$`DhOC4za6)hG33vNHUG5OZE@^-$N-VB^u2+$>u z4VP==SAm5fV%d5_;v2yECVHBnq`-_Mvkc_hc+^VMqr-HMQ-wQjdp|J>WYNy78c?BCWmv$al3l6uagccofEAnA?4;bb zb5mRt&;lx95P8M9Tbl2iDB&cW@)C~1(eF1PcRBWY5&(x#_6;{W9niimul1DVvJU3bNX3{-sl+Rk8~2yWjYj5lwr{9xycyNYh;aogR=f;SNou7m=GBK74*#QKXZK!J z{w7wuNMNi|RhQHsVKiX__438ura1Q*@D7@#`I2x@&90`zb*7pXuYkz8Y(@MGdlo8d zfFX!ifzDc(zzD=^8jUJ%e;tVb552REvGQL5eYOA&p9s}wluyLm=_+6?b5A;5tuA9u zDB?xjw!sLO(s7{-gBH7~;^|wK6g_N1)s}6TK0`J1zeaJ#c4L9rHvkeAIhz^jlU)jgPT%5-}?6M=1xz0b3*U_>oI2cFEGM2IaO0yd>ZnMg3KbZZ zGh_^MIc(W6nY7tT`C_<2`(*T^aEGn%EyATluoKK8_^3w{aj~TD>rOvE@@06UkaJQ# zBNpc&aJe(azB~8RcW%d|X3mOpc!M#G+PkadV)HvCYAMB9>@jicE#1fN_4qIAWx60E1Za8Fh;;i?{y05U!9tIu<0eHWqjXj$v3y_`B4YqWfye~RWi;jLT`jV{zN@*vh z(t}p=i^N3gsF&XvtS*5@$6OaJBp8>C9<>aAUw_mTJ~?$LzUuG1w^ww4n=S(FRB^FA zzlTMiVVK>SWAa{%abSk5M4fzTs4&jA4!+v5E+Bj>jU{Ad(ia+J@dJj3zB~LOP0)7; zY_?81gnP0#6yk?|-4hN4hSj$5rL}X$x0PrxjO2n_< zW9*D;^25UTV{*#ov(5!_oTbhl-vnAq0G)8U;Z8Qgc78uLwA=&kLpY{-*E=OPdyiC=WNYAwk*M3A; z-DFOmf}oRrUPJF7;XQmEw$c!uO`5z1^k)&1t{ltZiG}FJM)@8@EN02$*A}eCp~OrA zRyzGQ2(MIhF$wZ(l?}&3uoU~S3%<^XnwDLs-;bQW<1CSLudAHJM=JyNL>KBKqvAUH zHXV0^RdZk%WZ4`K14NZBWT}XUDlB2Ilk&v%KdjF6(pFDZVvjR{p#5K=*Fa+n9ab?O}sw(jZ{Frw(WsfI9a%e zD8)j)$1zdfG-1`Onv&6jHw|bx`Gk9_h6q9*Q*E;JmfHAn@XIClw~3}}s+a>VIqrRg zO)@agTG)@)cL2T(@IB0O%3wog?G$6e4^Rr=01BHF$^>Btxy4^MxS(O42B}@AGl0IQ9sF!m8 zcGt7mKb7B}x!9!l@G#w+#@^tWuJU|YaC=7q_xr$0(vEWY_D9=cv9#Z$pg$BaLVeMG zsy&ftEoYBEq<%xiiIOD*izAUQqj_;Zs6C2S@@COH+Lm+0!B1s`0(=Zuljydpel*0@ z2=R_#cSbD4wRwQP7hEYnxnt96kfX;L2soiX%MlF+$mLES8JA*98z9483~eN{aIIWS zKc0I=p6^&rNL26qQqG98uXsRmF7R(a3&#|EbI3(&9k4lB@mW`6tRBc5lrF<8F8yLQ z#xem;0R=W?*>{`a5IHJ6zR=B_ph-^&Y|AhzD}6Zy*B*KxRo_0`i*+az+!^fQ>TQ#K znh$SNhvM}se*FuxdlEm&`Pui@$+06xH`@MKW} zz}q*Y)dG;9tjJ@h6E|$wWup7ggF9I9@ zcmyE9_>s}pdu$q)!X%vCm;|8eu^o#y0hD5tgGU>>b?CCuail@@BnAOC(kvDG1JNe> zZK$xan>sdibb1y8^s2iJ^f=3&RT3JTx@>76w6;Fx>QVigORf@1K934dFtM_ zt>;u2Uu?bx#DsGApd33|X=fjql=@YYW_M0@3XGk48!Ou7%?ICzv#3}~EH4%niXBy_ zR-$z|z!e1O6j!3n*ZbrrzX0M`OsM}YFZ3vFzqzKG!RAc(PB0B!}~dgC^}3+zWf z2jF%932-Ms6~G+;)c_^|`tE%R+RQNXlHmF-Oxz8?{N0PzL4Yp-FsI-zNm3*vY24Ql z=pt5KS-6z)7nv>UC1?{N(+CRK7CLz+&V$QqchY>f%dgz09h5oyu0g8-$Td(dA2fn7 zzE0K4lKV#-Y(znM=zdrJZwblDWP2X)^OsDe^3D4j3*7m!s6SvX8v7|UKfRiA;h}{& zR<@eyqu*lCuac$v(D`vuf*nc$W&_Ltmpn_gt)&Mj*Tc0Nwa3dv`-?m$(81x-hx{U>g9}n9!ZLAAt4w09sE0 zybdr0%lN*+k?<3A(;>kefQQhXZj%cSPhZGJ1$8B9@r+Mf&XvKi9t#+&2qDG1M@A3V Nq}nQM@}a~1{|lH4s`&r_ diff --git a/utils/__pycache__/general.cpython-38.pyc b/utils/__pycache__/general.cpython-38.pyc index 47f697eb15483c9ae275a62e480bd2bf9268a66f..3e5b2903d9aa6fad8911329c5e02e59a95b31f35 100644 GIT binary patch delta 11790 zcmaJn33yc1)pKXh!Xg0)A%TPhh9Qsz0tpZxfe;qigaBp)#+SJ-lb6iKeKXk*M@Zbj z1-u|usU=z!v~^4W)>bQO7sa*q|Euk5{jJ)~)>`}7{%!eN{m(gXk{QtcggJBHx#ynk zo_p@O_syx()^9v$9kYGRm~0FDOZavLzIk*^5!>;2`E-_4C#&TH?9|*Li*KGfud{;Z zPOtRzGdpn&gFbE&*xKk!7VmsQH$w$DlhC@ zp)p=`iVVlJN>&IcWD?#$V>u zd_K@^;8wN1-O3m68hF~MZXB?Hc>AeMeBluDZBB_^1fs3tPIYtV7WiD=Zf&yc&i8Fq zx2c=et(`5rme&nh2Ef20*1gy8)bhD^IZX$ z6}$m34Jk}3U>f;Kz^qJRe0&ve0>!W7uc}kkT~jQ4HK^ar*9y#p52r&bOy{s!N}{ex@({(M3HW3KQvpy=TL$yMGCEsba^d2p(QujhHZwB61- zxHg!?0iFE6coz==Sr-rU2#5*spYXk004B`e1#ZTtvy_3@+pcEI%WU+`o6IAHd1sTQBQn#&;uEbrha0Jopt$-e|t*YLae z-SC~{C;6A*`&#}s{|Y|^6bJZ!^C5mOv<~uj_G;sK{(7J)Y&mZBBLhDBU7=Ik7ZsJeyVfa48f5pGWpM;*n{MYjqLHjCAwPV-Cn9D~mh^|ArW5rAb^tf?Ky)yuX$A zIu6Ti&Tp_u((QVtW)@_b^D8PlqG7c%7*q7P5~@@<1cG0WM}-npD|Ia#3#s$gYQYXw ztlhdvjZ3@R%Vx_8cW6>E3>RexkdX;I@=^CxHc!6nUNpBsN{`BdQC&(f7*aO?oo)qSv5NV_pJtV@LL~EA4QoY}VGn5$ zHG_jkF3$<(r1kK~$8u(|dihq)u{9{xa4La-s_TY*r+@Qh!U-r*NBBepSapdd$lGSv zxF!sHT-D=ZDbi$ed?L4!t&;EMeyd^(c8vwF92FF7iq)X@(FOV7Ti;O1O7u8Z3e`)P1VifKI zW4#X1h)*sx((Uh7gkgh*VUH>C4#O5`4_@GbN5)sMh#WVuNS+_Rk6kIdFPS@jFG|R; zKt6Fv>#QpQ2{UDnYhl%JK)8wc872}JlU7gITSTOG(Sv=x01PXS3uNlGi-bHoq3{By z{boX-E;T~O5duf#=zWnuhlobBe$`~uhkY4J;bV_HkXOcH@`1cLE4mTUk04212-4)# z!XOq=07u}Xy8*bZ*>*Sli72>0VMTK9MD0pa1{NXo8!lZ9L?c``#-ZKOMBJYW1~CV_ z-FhMx6A&I~86dxx zj&`BI1ZOaj&rP~X+rg@`Y9OXZZ-@{AGUPE_^|1V<61l_fosa(K$THD6?8 zIvCSK()Jw(Uf+R_?gZerLKND>ap@^|sGtTA25Ub9YhR1NLW=}aG2oGZC}^03Yg>E; z0WqVNM>b5&XZH@bPHki}Mx~vx36ocs!MeqLqeDFM>B2?`_D>796cF+Kqh0h!b}f<} zMSbjqe6MK7Dx3#jmfjKV@waOsb)=by1jHU3x)gxH{D!?9P(E8bgx$T^so|1C?UVDs$A&uq3O|Cl;&_#5yb$!R`CqBUqCQwoAAgL z#q*pm5>hU1Et0ntKRFwxjcCqT$#hZ=($oVZc#*O#TC$}KeIp(Oa0WIY-bUdZN^n)Y zgdIe9TE1V>(EJ!Y!$xA|ig*pXUPnNhygEW@jME}htA+8J+bw(rY(dTy0kpp}2S$o{g+63NCiwVK~%qEZ!%+Xi`o~PwBEca{wj@C7uwf zksTWus|(WADxLzK;wK0)J$ACRl)WXNE$zw2J;v)W?7>j9&9H@)UUP2=$<1Y+3$pWp zvJ&==%$-#vpDpvi{_wl9`bi<=yYFJ=i)T({za~mCvtil0*zH8{9)kA)81_CDb|XhP z8o`~2D}6Av#Ore#&Yxx$vfl%*Z!W+Y1>d`%h``VzZjhDZejRs)FC*^^3-NK0{OhdR zRWTgrGBx-EqDhSpp=CIM7uEzeJrNeaKsr+m!y3LIg~_OAmh)ri4_IVmcD@|;Y-4%S zHM?~35*)GM1qLa?F4S1mRDvRQNUoS$%W`GU z+-i3okO2Ar$j9d1!)D6O^VTPyM7?NQYO(b<1Xz!VasaTc$3j|MxDbJ=ip7N0B3$jo zJzr=s+7`t(vGZF1yg34nXW-K|0eK2$H6cO}PkN9N0PIZ2P&Zs*B_8Ndb(vo=WrEKh zON2tE&YvM8k6cx;nvIh;Rcy)IiW2P|ir}ytK99XY`C-NMwEeU&ja=Xs=<>(<;vLb* zO3+|rtxc3Ql?KG=xT=Cl=WzI_<3^VL@~Ufhk=;4ijg}a$9z{g7NYLvxT!DlTY9uc5 zfT=)je9o8_i9m9?1Tou?-U(YiuH1BzBPlci&&z`9BKG`nU3ICIF}Z6&paMH#9*9|% z9#aEayQT_(>Olmgvq@?K|F~f3TnruBmd~>p{ni=9A{P+-dBv@pH*D@+SS-}N2~DVB zRCK0nsoCNDD-KvHAFJtJM{_(8XC+(HM|TjK^b<%`i-0^(hpov#>6-v4L|6S8e05PQ zqYE38qf>3h$!KgC&QLUOiCu(opP>fuN_-!0952e9*10tHtBkk>mLkkD>ev?BblDM&|phQFSTVd+b zRab6b08N>bT_C?v_vz%7z!Rbsr-;A@HfqUpi?yP9Y?8C9pau1J_k2h|Vcbi_S-E+M zr-b~MR)dK-D24NOs@W2GZppJgvZEMTR-t~5cmhs%w4T9piFR0Ah=3K z$T9UxnyW;0pAg_J|!GF3EW54 z;i}wB#EKLe8f@y)=aI1-H)w?`8Zz67FK7OkkA)PUL)qjF9Jl%`KgZAIZEnsr<+ z=}aSpylcgDPQg2x%gM8l$onwGM0p~lLWHTkaW%pzvx_H?M+txn+>ix=AznZ$-C#V^ z*GRrn7BwtpE9K6H2Xa=Uip>btNOxoDdeVt>NUsZlgEk?}W(373%?`B?Y;$4`1U6DyP-ubs1D0wDS?X%w0gs@NXU0#UA3 z#N+K^I}G%ei}Z>@L*Q@&hrJ#x-XQ|eZ{#*Lwlr>A-LlQ!v~H{T581x1EcrK-fXh!T zLlQfddq$QD2R2nGP`{O6KYG*1fkH!R)1#pTpeYi`Ph{XpqarahzlivX;m~Jnkr^G3)$2mFZIe%LD9rOYdi#1hB8E)~ z$HXf*O2|KKn3dCuT+E1&g&Tcrt?b#@LG1NN$L2bAST5hZr)4^@hszl!WZJ0EVJX(3ydVzE(v^1A zABrk`ByAE~@_^|EB#i)RDQK$O;mKkh%S1$eym>cUCO2(aUrzqEnZfBq@?r${Be;mq zY5Bb^D;HfXohM)p9s_dFjIs~S+{JpxrI#17+vOFP?^)l32ExbUb(^wKs^Nm-GNgdV zFT?H)2sR?1@WdPqx#HrycKg<;&SOYaGhDJYVV!c}Ql)(6v+CPKk$kje0oyL$Zz*?T zt`kg7+&;tOgC=A^xL!JMs|fb@XK#-{HiQZ+SS+wlP!2mTcW*CD-T}}3ZXhl0ZSOCJ z%QD@SH7XXk5*P(r44_H+I*Sa;Nx+t=-NNdLxg0pI|=CWKS=EH1K#7SwDv05d>t)*Rkc1%X|gw zae0OB?pzdV*xN!0RTRq4e8(nD1q#Cv6sj6AoPBBtN;mO@JbmS@mlk4A7UXbM(6x9G z45cfCJn7$6T$TG2ZMdgR!v=CD`g;;YV8 zAVcvGVru|=fXhha0r&ue?gJM-j>8tEg#uZnw6eMKkn$#bMk;O9lhVeeTORpf+Y9W{ z;b_3l)(~qFg8a`o(~Co(KWWLrTMzUDtcqx??@Ve{u)YgC_Z?owiiY3ec2+PMgD)S! zR0NrV|D~4YOhY8@t^tR9J(@q<)b3)nqhmk=Ty$a?f)ov606w=Xikx9?r(q8nn1bs8&!KQUXDbe{Cm;Mc$lpebvidkj;f zm(lCCFuZ1Y8}+S~zv-M-L@q(&;b<1oHCS{R(qu{33|1+dx;lNS1m`76B^JYD#vD|R zcokub%tj$_yXMx_ARf_m!(sY~>Z?&<7YK2OJZyoi3Vj>yZhjxyl6pyC1_mev1!N4? zG2Q%p3AmrJnfnehGpxNalUJ&$PlW4sQ{dq(6y*OH3@{aY4y|`XM$u!qt(TA|;qnkS z#}i2P3Ienba#}UoGZqmj(Uee&IFHPZ%w)~LKI6D3Mj>Is8@ZTYlmQ!9bMukgD;N&bf^I-wlE(sYxFKP~^+HO8&0DIBB>b zu>D#z0e7cifeG2L!rNymyr|lq>C1)-A3P$#btmMEXr$c_hb!11vS5_i6fb}npIZq= zqIz5l_;SK!(KmE<8by3n}(v_46D6O7j5!}eYHtEw15tp;e>Z| zu)V>W(9H6}8fv3Ue!~W;V*R0aQ9K*06Ekk4okHAT0g3^*MhQewJsYSeUPSvZoKPbV zJmg1xMqd4aVN2w)s}D0xp1b;^spx$;8RSA~28RG}EE;Z8!;081f4tv2E(C7&xe_rB zXGL9%mD$%UPZojpg26yI!dJH&){QIStVR`-9nTS_$#kTIo6%6ac^Vp%x@*(@L7{NN zt}ETC3O9t#aKZa{y1E*zf*}##gM!jo=m8{Tu}j3C5T9YF{OL7|;B+@G`I40m?`N2B zp)YW6&ZNV~Me_WC);XxX`R3X&bAFXRS443!Oh@dsXqXGI6$^uN#WJXS=a?^QdXZ%J z7qIjL?oC)UQ1QNgu$sLl*??~xuIPSiX|Xs1jZ`hPe_&oUarr{UfX=|Bhr7BRerX|o z1e&Bqel$>CO$PtFkw4EI*>BCxZWg!6^@Hm4e$@Kk3ze@8ZchHcoPRmWnOQ&UcNPcr zC6$%lg4A_FmkO6zPI#-Dfbb{*&3$8odLpic{38=ehu-&~M|_B&0zn4=xRy4r=q;7^rf@1E2*K)ewQF%W8|&ubAc)g)6o6ra5H(!!D7~~wpO?Mk#XkAbb!E&W^R6#l zM4JGeA}Dq!is(d%8-p(=s6q)tfqr4R7rH*l4h?^JeU5c8?g?g|!)c_GX7JTl@ZVon zd@v1BnExnOd#xY*65BVUpU29onUqFLXYP*uoc zR0?5&6OG906>L$7xlkUzX+g3Ik?E>sCq81X6$haO3lR5Kx-5WX5Dp@!GIzojBszem zXJK;l2SR!^cIy=CK4%~ihb$!UA|XwYIzv!o;6820HHTKSJLN5h9yQlL?gH94=xPyo zK!knUi3-qc)0uV{AD2UynYn4+Lx|G=z#)~|DzxE3RE-hVB&X@?M)OE|T7|Jt!NXew z`PAXr>@L{MCd$7bE?Z4ijzcU&_1#*S!!ID9A|m&@+eI{t_ojv;0Yyn2Y||(e@7|T* zg_Ez`Jel1?Ja4&qb1)aETACpPjc`@r>;PQJM6?O^mMAi&*9ov8lQHPQi*u%%5uDDX56K4Xr02EXi7_7g~w7*kdTx|v31 z5@yg@)5Ae}4Y6}D#jZuaiF({TCyJR0!( zY15Cxg=I)0hq90&37y9mkwz%JZ@DbM^F$`+)eDu%A13S|5?y#V9L zaKNQ&tS?n0#}vp7I{fH_9m(5?FSX3Iy%n{igts1XScZull;QT%8)d(rF31gg^STYI zMLW_wfi#pT)?tf&8fKRH@~$I=ljvfo6A6p4dj+;;$d`{?1~-|y8sC%%w?KuXaGib_ zN2!_aU&TBmuR<^%!5jqj2v#9zMX&+E=#EXV)x<8q;SZ7GN<1#a0i=i^*a?D-@vGo9 z19-OyR%sHFFjQwz>>NaZzcCT_B8Z}rPh;yv1aBaC3&A@GevJTsu^>K1@CkzR2*%;C zi3py@$8u~fK(GWs1ATJUgy2;K zmr^((p!|);PcaL@Yy_1E79d!PfD-#EY(b&E92CK(hM)(*Aq00KxC;TUdvPDMe7Wm3 zZe88jRcfNgF=z~N8~Le_k?W$R)-$=K#l0p}!X zAd{1@3EoYbc-tmv+_X*8&DEyqrfJfo_t$QdrfC~~yJ^~{+ji-0cmHobW66}o=xOH7 zoA=(ldGqGYjK27|cJT>q!VME9jI-h2+Uqt3pE)<7fQc`Z7qS#*e0;b(ve37X*-)Mk zUKFYDRYWR%mCUA3)2mLii`9cR-(r1nR}G&y!{%F}FV$Tam+?tFcThXce9QF}JWpTA zCjz@_(8ecDx1H8bYrfU!O?21kOS;zZ$+K*VdaYiouv2(G)i0*%&}C}3=BpoRm^RXI z1;36@_pR4Ux;F3`d?ug8XWwauVjfj5;B&efLW~#QsX=*tI5AnpL1~&XM+rM zwfgGQOm}r{=5yEB^sI|pAZ=c^c2*P9c~R7**QQ1Zir3iqxA}ZtI>3Be`5WBJ%TU_J z%lX0qn{PW`#4GUKtnc8J7Z|VN)dLRSPW~NU!At=CKD|ZX+qIvs`b zf}KzIowoUI+-T!vh)EdfM(g0L(hR9+>qbn1jG<;0?euj9~n{kvBo{L;Nj$ zmL8a4;~Sy=Ccb&V>1*TPP4O*!D?|kO+k883Mv3zsd?z~S{2jiF?*^t_uWDz!h2Ma3 zhbr&kdr|J>`}lrPhPaR4i0>|a3Fun+0kn1N-qSYSuP-^x`1klB9vEP!@#hQk@ADw% zpos7v@D85pL@}y}m+;Um8{fwBc}cs2ckyoO;*0Sg@(7QDD$Zj(4l#%MkGR1TzzF_h z-ouZeWbmKxUfzdNLSN4pV<{HOdRKLt!L|5V790_kZ! zfNCGViQf!@e*QClE58kxqx^P$2AE^~4t^)TkMn=y=lI=dIl(2r2bdK9IlqtJ56q|d zy}IY(Nq+Aj1IGvWgTS5Q=lMgRI?XTef57(uf0#dl@0<9$e31VmC~oGd_xL~YM^U_m z|AIfpKZDY({C)m7e*&f3_z?eRbhw@Wl7EhW9+)%y1O6m`3Z*;vulUpa3n<;mpW(xx z`ZWI{e-__&@n7@j_*c<#mj8x-jsFWu=kyeR;WWeayvScd{cip;e+9Ji5dV;Woqq$U zd-OS7_xkPwydPw*@^1q70Dp~t3uF)Sf90>^`(FJ4{XzX+yG^s{bNmnB^B_L=>N`){ z&N>C>Eb7+BT|7T%>w3s{for>M-b?ak=a~{OGbeT!$wVmZ@6Z!Qs9*PLW>#CQ*B^={ z&8+rNBB~oQ*Yzx$F2Ct|q1KByb$N4TXDp&ucEkflA`q^u2nK?kdSxOef}NF}$+pU5 zI3WVzzC_1f;raa`}E%xTFY-L@5DcX@N47%yA*ZAM6Z4tiE3U z+U=3jy|gS%qe_a6rNhr;7qi(!23u$qRi+~hbuu`V$v=$i$Vux|CV9?$RwvKo+{3D+XZ*Cv z)g;%IjG|+=SWBGjrk#g`=}72CLev5?hF|;mO4cOr8~^2sTx!Y#sHG7JcBR^%Q?ekU z8%Cf*_Zr?jv6LEWvvQpBJmcuiCgat83})^+nX(vZOLZBMg1J) z4q^!v-SKcB(H;{K(`EEw$|ItQ(9TFVM(=fs2C8N0(H?(KK$vzEOh-JBkUMj;#*CmX zw}KtUTt2`K%CfwLxe4?eYq7ljXnH9xKezYlreEb9Za4^}*CG0;?I?h$@q{2IKvD-x zL|~k<$oD2UFpn&m5^^hr zoMNlIZ_1pdi$H0*jbuD7;2R@j6So5^ZY6LVfwU|orcYj)a{X$>Ml;8^gd%z@nP?Ou zCd3(#d+nBiTt=uPnv9svb`ikrW-yV-{98AX!+5hq7YXPFzvY@QPH_tiKSd5sJ61R11LV7w!`p*L zxS0i$$HF}bn95+jtf+p(4=>L&74_(otHEtwdWeUJ z(Sro;A&?$~(*Km~n6YTnwFaN|x^$QHxE}zoui|6SDt2pdH@kSiCad>NmLJc!Z{~7f zP1ZhywI7Ni(no_MZcrv4nOR?<)c;4&Sn8+}sbgkE^h9Tj8?T{qC8$0&JYm*4Hg8mm zA-k~Hcblw7d}fqOnT*d~H{OGKzqZPASzG9ld9(B7JF}~1s=iN*Ql^KoQqC^uW9Q|4 z1$)@z@}q*83j#z{3t%$8>1aT6P=b}d*ER4+`-A)AUT$uBhiP!6+3@vr!Cxwe#-&RQZ(Y^w?!Sj^`G>^(e zLz)`1Ank^j7`lQNY7$qoNnk!f} z3yA^IDIsPSwmr&>#6`4b>PFR-a%leY4F>hgvK0F*p_O(Yqhz|ki*|myk&K9UiO$l_ z)FN$}%w$pNeD;alQW|Gd<@2RQ)7BC#6MrOjnFjgK(o*eD)+#+%nk`o?xc9m|qVW*D z%hKU@@)rwc%$pC4ncdhM)Z>XzEDE1fX31nW+grl^XL!B$1IGR=OUw7OzsQs2o4unX zP*%AltsDdbhtT6OOB>~jkIGNVm$ON-d||bFD#_Qxe@VXZVYWcp7j0>I9w z*hgkzux9mj9&}Ng=w&vqUz_Ia{ZBLF%dul!yK%6G$pzNfFddHnQ`~b)o^&{Nd8Q zh&uJl-l$WCvWxeK_ryq`8*4k%6EZ?=SiQ=5X!;OLo$cWmGMu#eM=ZB>`K{ZEiK!wc zHlm7zBj<+)s?n;!I80|K(qZ&l92OIan>KtE7}Qi&TdEbVD0eJFQNEZxQ+BTS+q6dT zgbPk0)&wruRY@LS87iowqOyGrN~S&7vlsy*Vsyrih*zY2Rq0%1&a@g#EFv*VpaNxb z>#AzDRvusVQjQvjI#|`nO{)v5imz}-;$2JSb<}qPfkCRKjWNRN{?%T#LB70tb(2~T z%p8Ya*9MRgTCACU!hv-925LJJ_h{zXzY*A?a}j~$J$;<7wmH**z*R`DzW z8upkGj|V3>02DAx#xspaCXA{ZXOkj)rT7A(LC0>b3`+br+T~9Q- z2&iaqfr+Wv*)n&A`T^R~RZP zrTDc-AC=ms8c)V-KvgW_EQBQ$i#lmw|1EFW)QnTnqnpZDzIb|->|M_-R{Pg-TsEnyTn)I%+1BA%fzpS>`_l74ju|7%xoR!A6+0- zP%dR1aC5Mo2ilPMCxKS(rmUid^9XTylR(;A)cWlwobsisoF_~T**0wT<~Y5VPN5VV z#RfvFjI$dh)1mk3LDMN{;fUv{t(8kMI+%jEV8DRV+3#`UmK5sXEUmleju@_jghtJm30!4b6`bATBr#O{Z=sU6jNqLF2C362luUQ09*+By zVkBd@nOe)_bImK*S^0z0`JYDOYBy_sgn%Q>5qmEvb#rL*)iucRJY82^QBiFn%*N?;x;~fbvud zzX%dn$1w;t-LTB1zRa0a|@X=GDj>P{siu84Tin1-^uB+GHVXQPEOD z&^|9d*QjMQy5S;jR7Xo5X<_N3ba(_+K|K`4>Ie`~mP*1vb(Kp~ zYSm6oL$hYn#=3#hHC=srIE+;8%d+*x zPv?24B^xnZ7wSUUqfY0OT8oR*&Y-eY#2{R3BL-!txKi$ItyuB`SimMKu_I@|S=AYe z#*+#0IE{l$X7zcAN@_Bnm(R7XWY5b_TAy4^%6i9H3*j?F@d|056O2WJ*yW=5m@Zt@ z<7QM%ujy_JMT6LN65{vrX9wmoUHSC=k03Ci$>N>=?sh=+I^Ut+z{obNFjm?}U z*ZHT(pZh1WcjQO@f2|;9;wfsTFm%a5RXUblB6qu_&MMbYDVrJ!x0%>i^O%pU%Wp2e1r z(nE^?4Xd8OUI4mS=SLLm6mzd;wydLsl@7Od%x4FPg*OXHd^i;Mbevb(gXg@`LW@a1HWsc<0E) zx;6Ju3Rz+tD^^{iJPhtbc5Cw>W~SB~w|I>lM!k_02b6Eog$}~Ifew~p-$coDBWxIP z+OR1AV5wR=+y$a~n*faufomC!Cyx*$(UP!+aAop^$U@{HCt?NC9sNz^*!5zWU9$|c zfLOWk8W-uP_8V$BE|+s+^T(|I9kB{LTb+n~xOJ2h#!7}YCn#^aVn?{lI#NJsww_8G z2s99AB#=36Ux-(;O|t%uDYBp>`|iUZ>!ZxovL13?<(`O^c-4ygg%_N#(v}m$w`e*u zxKfqIRBXrYGZ8h~eP*k_Ia(DH3$6&1hs7g>3yH;M0$T}O#b=|eG^VkQ!yAp)oYQY3 z6RSz1NaNKilo@flo!@t4^7qk*y`2)#W()s$`E=hbmM7onyFR5>+32amN>^1Ld@J!( zBTy-?IzFh?xQZH-SEwyO&HOZo#a0(c2nr5nRx}pHU1dx;gLs`fyhjv{o{)aTbRo>s z?!5*md7R}bS@g5N9^>aOeGi}5gB-zZ=+6+YXx}(e6>2v}W zLZ%BZQP|9|E<#owp@rMjZMbR2I;2Fy=vF>$vMwYXP)E7B$%1Oqaa|OQK|9k9?ZkIz z=ovF7e|Gdy7L_ND{g&M-lxO9JZ_5 zO>O&noU>>Ga4m#0fHjCEs@@q0w~L?1>Ju}X)PXHV*sPJa^z>l`3t*6#rUS_k5wf`W z1EDk3JbPjpJ2w2;wodINRZDK5q1Ee zKMUB3r5y{gHK@0p&8MbXj}pg;?$A~8soDncLo5^|pSPTb z-cmPEx_n6Mcho{;vrw$AsUmurwB~3tcHrj;;s;FarF8?@^67zsRW2}qcFmRz{y!E! z&1BK9jT_e_?vdJnUYMdW{QuJB%>z5Em!oTPmLCtys;h^QOh>oghwDi79*q4D%8Ky> z)M+$**QDlH9m-w8x}jbzTW*@W<`o(y0&)kvc~E!}Dp#k&qqvXw+i^W=W+h^{o(zuM zD}EJZ-dyo1`Rq-_tW26W6)jUiTP5|%Q3i>TI^R(s_vLix$SMM~7)Me=pwM*|&`5c*0?KsSIa?1WL{V(v| z8CINCL(7Wdp$W-ZM(|Ndf!aEW3njM3;bft%BlxH+h2oNEBHk*JsZ4N%Y`v`}wUm%5 zo~b1CMyjf_`YkA7r3I+xNel!FInqI(%G$~H(%^0a@MTk6@(064H8mT`(XkIC6Nn({ zw4bIJN%SpKk*0Fu?TzeV*>d|+DGy1a;2oM&iiP)F-{s*A`gM;<+{uok(R&Z9KEj0H|~6OmWrUR|C#vw&SxJI{+}iVvtP#VJ-m z5}sTmTo2+7#mXey?IISTBe&^HBB@w`ZI-0cwRWImj581xSI9yS@C8_N)?F+Uk3mYX ziF~4^3Bl-!3?2DBWT*kt2(dxLNQ}D1$6l(2Mz<%KBaoNw@US7J_MAH#QYBz&J>Wd? z_d^vc(RoRRB2>qg=s_oZEmU7k7!D^cI#un>P*GE^+XNCa9lm}_Uz+$Ektz3NVhXWx zrNj)j-!V!9)Yf__AvFaf>)d2$z!4GIAYjEfKpg&_#Tn zIzMUk`Z|3%kc0z8Af?1uvNF(3s%Iu)2A!7f4(Th&?O8MZcZ+Rg!|edgO-+{3n+XF! zH2fHqmEQIy^$iRCgM9DPQybqUJ)b17kpLZlt-Fsds=h>^0swntC;i;Z`fqEhLF zx&kVctM4*W$~T<2`;7OEq_Ob;mOmC5gqsuaMCPZTKG4IAp`MGZwY-;9Qvq)~aUDkz zb4cPOl4n%#*pVpho`wpn<^&6i-wFu)PE4gCDsY-9w_iOt`2FHMb$2vv-qt8W#PeC= zsRGG1D&-OwP2d-PWZ}g!&gzhxJsN4`62PY23Ab z$L>am*wNf#<{Em!9~biY*=pQhqSBm@9dl^0V_iu>x;Ra8-_hQLw+iwM*aXd=)` zU. - """ - - def __init__(self, c1): - super().__init__() - self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) - - def forward(self, x): - dpx = (self.p1 - self.p2) * x - return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x - - -class MetaAconC(nn.Module): - r""" ACON activation (activate or not) - MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network - according to "Activate or Not: Learning Customized Activation" . - """ - - def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r - super().__init__() - c2 = max(r, c1 // r) - self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) - self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) - # self.bn1 = nn.BatchNorm2d(c2) - # self.bn2 = nn.BatchNorm2d(c1) - - def forward(self, x): - y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) - # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 - # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable - beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed - dpx = (self.p1 - self.p2) * x - return dpx * torch.sigmoid(beta * dpx) + self.p2 * x diff --git a/utils/augmentations.py b/utils/augmentations.py deleted file mode 100644 index 7c8e0bc..0000000 --- a/utils/augmentations.py +++ /dev/null @@ -1,399 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Image augmentation functions -""" - -import math -import random - -import cv2 -import numpy as np -import torch -import torchvision.transforms as T -import torchvision.transforms.functional as TF - -from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy -from utils.metrics import bbox_ioa - -IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean -IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation - - -class Albumentations: - # YOLOv5 Albumentations class (optional, only used if package is installed) - def __init__(self, size=640): - self.transform = None - prefix = colorstr('albumentations: ') - try: - import albumentations as A - check_version(A.__version__, '1.0.3', hard=True) # version requirement - - T = [ - A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0), - A.Blur(p=0.01), - A.MedianBlur(p=0.01), - A.ToGray(p=0.01), - A.CLAHE(p=0.01), - A.RandomBrightnessContrast(p=0.0), - A.RandomGamma(p=0.0), - A.ImageCompression(quality_lower=75, p=0.0)] # transforms - self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) - - LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) - except ImportError: # package not installed, skip - pass - except Exception as e: - LOGGER.info(f'{prefix}{e}') - - def __call__(self, im, labels, p=1.0): - if self.transform and random.random() < p: - new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed - im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) - return im, labels - - -def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False): - # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std - return TF.normalize(x, mean, std, inplace=inplace) - - -def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD): - # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean - for i in range(3): - x[:, i] = x[:, i] * std[i] + mean[i] - return x - - -def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): - # HSV color-space augmentation - if hgain or sgain or vgain: - r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) - dtype = im.dtype # uint8 - - x = np.arange(0, 256, dtype=r.dtype) - lut_hue = ((x * r[0]) % 180).astype(dtype) - lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - - im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) - cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed - - -def hist_equalize(im, clahe=True, bgr=False): - # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 - yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - if clahe: - c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - yuv[:, :, 0] = c.apply(yuv[:, :, 0]) - else: - yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram - return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB - - -def replicate(im, labels): - # Replicate labels - h, w = im.shape[:2] - boxes = labels[:, 1:].astype(int) - x1, y1, x2, y2 = boxes.T - s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices - x1b, y1b, x2b, y2b = boxes[i] - bh, bw = y2b - y1b, x2b - x1b - yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y - x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] - labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) - - return im, labels - - -def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): - # Resize and pad image while meeting stride-multiple constraints - shape = im.shape[:2] # current shape [height, width] - if isinstance(new_shape, int): - new_shape = (new_shape, new_shape) - - # Scale ratio (new / old) - r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better val mAP) - r = min(r, 1.0) - - # Compute padding - ratio = r, r # width, height ratios - new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - if auto: # minimum rectangle - dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - elif scaleFill: # stretch - dw, dh = 0.0, 0.0 - new_unpad = (new_shape[1], new_shape[0]) - ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios - - dw /= 2 # divide padding into 2 sides - dh /= 2 - - if shape[::-1] != new_unpad: # resize - im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) - top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - return im, ratio, (dw, dh) - - -def random_perspective(im, - targets=(), - segments=(), - degrees=10, - translate=.1, - scale=.1, - shear=10, - perspective=0.0, - border=(0, 0)): - # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) - # targets = [cls, xyxy] - - height = im.shape[0] + border[0] * 2 # shape(h,w,c) - width = im.shape[1] + border[1] * 2 - - # Center - C = np.eye(3) - C[0, 2] = -im.shape[1] / 2 # x translation (pixels) - C[1, 2] = -im.shape[0] / 2 # y translation (pixels) - - # Perspective - P = np.eye(3) - P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) - P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) - - # Rotation and Scale - R = np.eye(3) - a = random.uniform(-degrees, degrees) - # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations - s = random.uniform(1 - scale, 1 + scale) - # s = 2 ** random.uniform(-scale, scale) - R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) - - # Shear - S = np.eye(3) - S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) - S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) - - # Translation - T = np.eye(3) - T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) - T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) - - # Combined rotation matrix - M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT - if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed - if perspective: - im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) - else: # affine - im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(im[:, :, ::-1]) # base - # ax[1].imshow(im2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) - if n: - use_segments = any(x.any() for x in segments) - new = np.zeros((n, 4)) - if use_segments: # warp segments - segments = resample_segments(segments) # upsample - for i, segment in enumerate(segments): - xy = np.ones((len(segment), 3)) - xy[:, :2] = segment - xy = xy @ M.T # transform - xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine - - # clip - new[i] = segment2box(xy, width, height) - - else: # warp boxes - xy = np.ones((n * 4, 3)) - xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 - xy = xy @ M.T # transform - xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine - - # create new boxes - x = xy[:, [0, 2, 4, 6]] - y = xy[:, [1, 3, 5, 7]] - new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T - - # clip - new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) - new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) - - # filter candidates - i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) - targets = targets[i] - targets[:, 1:5] = new[i] - - return im, targets - - -def copy_paste(im, labels, segments, p=0.5): - # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - n = len(segments) - if p and n: - h, w, c = im.shape # height, width, channels - im_new = np.zeros(im.shape, np.uint8) - for j in random.sample(range(n), k=round(p * n)): - l, s = labels[j], segments[j] - box = w - l[3], l[2], w - l[1], l[4] - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - if (ioa < 0.30).all(): # allow 30% obscuration of existing labels - labels = np.concatenate((labels, [[l[0], *box]]), 0) - segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - - result = cv2.bitwise_and(src1=im, src2=im_new) - result = cv2.flip(result, 1) # augment segments (flip left-right) - i = result > 0 # pixels to replace - # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch - im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug - - return im, labels, segments - - -def cutout(im, labels, p=0.5): - # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - if random.random() < p: - h, w = im.shape[:2] - scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - for s in scales: - mask_h = random.randint(1, int(h * s)) # create random masks - mask_w = random.randint(1, int(w * s)) - - # box - xmin = max(0, random.randint(0, w) - mask_w // 2) - ymin = max(0, random.randint(0, h) - mask_h // 2) - xmax = min(w, xmin + mask_w) - ymax = min(h, ymin + mask_h) - - # apply random color mask - im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] - - # return unobscured labels - if len(labels) and s > 0.03: - box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h)) # intersection over area - labels = labels[ioa < 0.60] # remove >60% obscured labels - - return labels - - -def mixup(im, labels, im2, labels2): - # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf - r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - im = (im * r + im2 * (1 - r)).astype(np.uint8) - labels = np.concatenate((labels, labels2), 0) - return im, labels - - -def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) - # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - w2, h2 = box2[2] - box2[0], box2[3] - box2[1] - ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio - return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates - - -def classify_albumentations( - augment=True, - size=224, - scale=(0.08, 1.0), - ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33 - hflip=0.5, - vflip=0.0, - jitter=0.4, - mean=IMAGENET_MEAN, - std=IMAGENET_STD, - auto_aug=False): - # YOLOv5 classification Albumentations (optional, only used if package is installed) - prefix = colorstr('albumentations: ') - try: - import albumentations as A - from albumentations.pytorch import ToTensorV2 - check_version(A.__version__, '1.0.3', hard=True) # version requirement - if augment: # Resize and crop - T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)] - if auto_aug: - # TODO: implement AugMix, AutoAug & RandAug in albumentation - LOGGER.info(f'{prefix}auto augmentations are currently not supported') - else: - if hflip > 0: - T += [A.HorizontalFlip(p=hflip)] - if vflip > 0: - T += [A.VerticalFlip(p=vflip)] - if jitter > 0: - color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue - T += [A.ColorJitter(*color_jitter, 0)] - else: # Use fixed crop for eval set (reproducibility) - T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] - T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor - LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) - return A.Compose(T) - - except ImportError: # package not installed, skip - LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)') - except Exception as e: - LOGGER.info(f'{prefix}{e}') - - -def classify_transforms(size=224): - # Transforms to apply if albumentations not installed - assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)' - # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) - return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) - - -class LetterBox: - # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) - def __init__(self, size=(640, 640), auto=False, stride=32): - super().__init__() - self.h, self.w = (size, size) if isinstance(size, int) else size - self.auto = auto # pass max size integer, automatically solve for short side using stride - self.stride = stride # used with auto - - def __call__(self, im): # im = np.array HWC - imh, imw = im.shape[:2] - r = min(self.h / imh, self.w / imw) # ratio of new/old - h, w = round(imh * r), round(imw * r) # resized image - hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w - top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) - im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) - im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) - return im_out - - -class CenterCrop: - # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) - def __init__(self, size=640): - super().__init__() - self.h, self.w = (size, size) if isinstance(size, int) else size - - def __call__(self, im): # im = np.array HWC - imh, imw = im.shape[:2] - m = min(imh, imw) # min dimension - top, left = (imh - m) // 2, (imw - m) // 2 - return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) - - -class ToTensor: - # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) - def __init__(self, half=False): - super().__init__() - self.half = half - - def __call__(self, im): # im = np.array HWC in BGR order - im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous - im = torch.from_numpy(im) # to torch - im = im.half() if self.half else im.float() # uint8 to fp16/32 - im /= 255.0 # 0-255 to 0.0-1.0 - return im diff --git a/utils/autoanchor.py b/utils/autoanchor.py deleted file mode 100644 index 7e7e998..0000000 --- a/utils/autoanchor.py +++ /dev/null @@ -1,169 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -AutoAnchor utils -""" - -import random - -import numpy as np -import torch -import yaml -from tqdm import tqdm - -from utils import TryExcept -from utils.general import LOGGER, colorstr - -PREFIX = colorstr('AutoAnchor: ') - - -def check_anchor_order(m): - # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary - a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer - da = a[-1] - a[0] # delta a - ds = m.stride[-1] - m.stride[0] # delta s - if da and (da.sign() != ds.sign()): # same order - LOGGER.info(f'{PREFIX}Reversing anchor order') - m.anchors[:] = m.anchors.flip(0) - - -@TryExcept(f'{PREFIX}ERROR: ') -def check_anchors(dataset, model, thr=4.0, imgsz=640): - # Check anchor fit to data, recompute if necessary - m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() - shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) - scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale - wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh - - def metric(k): # compute metric - r = wh[:, None] / k[None] - x = torch.min(r, 1 / r).min(2)[0] # ratio metric - best = x.max(1)[0] # best_x - aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold - bpr = (best > 1 / thr).float().mean() # best possible recall - return bpr, aat - - stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides - anchors = m.anchors.clone() * stride # current anchors - bpr, aat = metric(anchors.cpu().view(-1, 2)) - s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' - if bpr > 0.98: # threshold to recompute - LOGGER.info(f'{s}Current anchors are a good fit to dataset ✅') - else: - LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...') - na = m.anchors.numel() // 2 # number of anchors - anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) - new_bpr = metric(anchors)[0] - if new_bpr > bpr: # replace anchors - anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) - m.anchors[:] = anchors.clone().view_as(m.anchors) - check_anchor_order(m) # must be in pixel-space (not grid-space) - m.anchors /= stride - s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' - else: - s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' - LOGGER.info(s) - - -def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): - """ Creates kmeans-evolved anchors from training dataset - - Arguments: - dataset: path to data.yaml, or a loaded dataset - n: number of anchors - img_size: image size used for training - thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 - gen: generations to evolve anchors using genetic algorithm - verbose: print all results - - Return: - k: kmeans evolved anchors - - Usage: - from utils.autoanchor import *; _ = kmean_anchors() - """ - from scipy.cluster.vq import kmeans - - npr = np.random - thr = 1 / thr - - def metric(k, wh): # compute metrics - r = wh[:, None] / k[None] - x = torch.min(r, 1 / r).min(2)[0] # ratio metric - # x = wh_iou(wh, torch.tensor(k)) # iou metric - return x, x.max(1)[0] # x, best_x - - def anchor_fitness(k): # mutation fitness - _, best = metric(torch.tensor(k, dtype=torch.float32), wh) - return (best * (best > thr).float()).mean() # fitness - - def print_results(k, verbose=True): - k = k[np.argsort(k.prod(1))] # sort small to large - x, best = metric(k, wh0) - bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr - s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ - f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ - f'past_thr={x[x > thr].mean():.3f}-mean: ' - for x in k: - s += '%i,%i, ' % (round(x[0]), round(x[1])) - if verbose: - LOGGER.info(s[:-2]) - return k - - if isinstance(dataset, str): # *.yaml file - with open(dataset, errors='ignore') as f: - data_dict = yaml.safe_load(f) # model dict - from utils.dataloaders import LoadImagesAndLabels - dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) - - # Get label wh - shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) - wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh - - # Filter - i = (wh0 < 3.0).any(1).sum() - if i: - LOGGER.info(f'{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size') - wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels - # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 - - # Kmeans init - try: - LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') - assert n <= len(wh) # apply overdetermined constraint - s = wh.std(0) # sigmas for whitening - k = kmeans(wh / s, n, iter=30)[0] * s # points - assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar - except Exception: - LOGGER.warning(f'{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init') - k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init - wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) - k = print_results(k, verbose=False) - - # Plot - # k, d = [None] * 20, [None] * 20 - # for i in tqdm(range(1, 21)): - # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance - # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) - # ax = ax.ravel() - # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') - # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh - # ax[0].hist(wh[wh[:, 0]<100, 0],400) - # ax[1].hist(wh[wh[:, 1]<100, 1],400) - # fig.savefig('wh.png', dpi=200) - - # Evolve - f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar - for _ in pbar: - v = np.ones(sh) - while (v == 1).all(): # mutate until a change occurs (prevent duplicates) - v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) - kg = (k.copy() * v).clip(min=2.0) - fg = anchor_fitness(kg) - if fg > f: - f, k = fg, kg.copy() - pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' - if verbose: - print_results(k, verbose) - - return print_results(k).astype(np.float32) diff --git a/utils/autobatch.py b/utils/autobatch.py deleted file mode 100644 index bdeb91c..0000000 --- a/utils/autobatch.py +++ /dev/null @@ -1,72 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Auto-batch utils -""" - -from copy import deepcopy - -import numpy as np -import torch - -from utils.general import LOGGER, colorstr -from utils.torch_utils import profile - - -def check_train_batch_size(model, imgsz=640, amp=True): - # Check YOLOv5 training batch size - with torch.cuda.amp.autocast(amp): - return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size - - -def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): - # Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory - # Usage: - # import torch - # from utils.autobatch import autobatch - # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) - # print(autobatch(model)) - - # Check device - prefix = colorstr('AutoBatch: ') - LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') - device = next(model.parameters()).device # get model device - if device.type == 'cpu': - LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') - return batch_size - if torch.backends.cudnn.benchmark: - LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}') - return batch_size - - # Inspect CUDA memory - gb = 1 << 30 # bytes to GiB (1024 ** 3) - d = str(device).upper() # 'CUDA:0' - properties = torch.cuda.get_device_properties(device) # device properties - t = properties.total_memory / gb # GiB total - r = torch.cuda.memory_reserved(device) / gb # GiB reserved - a = torch.cuda.memory_allocated(device) / gb # GiB allocated - f = t - (r + a) # GiB free - LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') - - # Profile batch sizes - batch_sizes = [1, 2, 4, 8, 16] - try: - img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] - results = profile(img, model, n=3, device=device) - except Exception as e: - LOGGER.warning(f'{prefix}{e}') - - # Fit a solution - y = [x[2] for x in results if x] # memory [2] - p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit - b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) - if None in results: # some sizes failed - i = results.index(None) # first fail index - if b >= batch_sizes[i]: # y intercept above failure point - b = batch_sizes[max(i - 1, 0)] # select prior safe point - if b < 1 or b > 1024: # b outside of safe range - b = batch_size - LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') - - fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted - LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') - return b diff --git a/utils/aws/__init__.py b/utils/aws/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/utils/aws/mime.sh b/utils/aws/mime.sh deleted file mode 100644 index c319a83..0000000 --- a/utils/aws/mime.sh +++ /dev/null @@ -1,26 +0,0 @@ -# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ -# This script will run on every instance restart, not only on first start -# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- - -Content-Type: multipart/mixed; boundary="//" -MIME-Version: 1.0 - ---// -Content-Type: text/cloud-config; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="cloud-config.txt" - -#cloud-config -cloud_final_modules: -- [scripts-user, always] - ---// -Content-Type: text/x-shellscript; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="userdata.txt" - -#!/bin/bash -# --- paste contents of userdata.sh here --- ---// diff --git a/utils/aws/resume.py b/utils/aws/resume.py deleted file mode 100644 index b21731c..0000000 --- a/utils/aws/resume.py +++ /dev/null @@ -1,40 +0,0 @@ -# Resume all interrupted trainings in yolov5/ dir including DDP trainings -# Usage: $ python utils/aws/resume.py - -import os -import sys -from pathlib import Path - -import torch -import yaml - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[2] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -port = 0 # --master_port -path = Path('').resolve() -for last in path.rglob('*/**/last.pt'): - ckpt = torch.load(last) - if ckpt['optimizer'] is None: - continue - - # Load opt.yaml - with open(last.parent.parent / 'opt.yaml', errors='ignore') as f: - opt = yaml.safe_load(f) - - # Get device count - d = opt['device'].split(',') # devices - nd = len(d) # number of devices - ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel - - if ddp: # multi-GPU - port += 1 - cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' - else: # single-GPU - cmd = f'python train.py --resume {last}' - - cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread - print(cmd) - os.system(cmd) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh deleted file mode 100644 index 5fc1332..0000000 --- a/utils/aws/userdata.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html -# This script will run only once on first instance start (for a re-start script see mime.sh) -# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir -# Use >300 GB SSD - -cd home/ubuntu -if [ ! -d yolov5 ]; then - echo "Running first-time script." # install dependencies, download COCO, pull Docker - git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 - cd yolov5 - bash data/scripts/get_coco.sh && echo "COCO done." & - sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & - python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & - wait && echo "All tasks done." # finish background tasks -else - echo "Running re-start script." # resume interrupted runs - i=0 - list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' - while IFS= read -r id; do - ((i++)) - echo "restarting container $i: $id" - sudo docker start $id - # sudo docker exec -it $id python train.py --resume # single-GPU - sudo docker exec -d $id python utils/aws/resume.py # multi-scenario - done <<<"$list" -fi diff --git a/utils/callbacks.py b/utils/callbacks.py deleted file mode 100644 index 166d893..0000000 --- a/utils/callbacks.py +++ /dev/null @@ -1,76 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Callback utils -""" - -import threading - - -class Callbacks: - """" - Handles all registered callbacks for YOLOv5 Hooks - """ - - def __init__(self): - # Define the available callbacks - self._callbacks = { - 'on_pretrain_routine_start': [], - 'on_pretrain_routine_end': [], - 'on_train_start': [], - 'on_train_epoch_start': [], - 'on_train_batch_start': [], - 'optimizer_step': [], - 'on_before_zero_grad': [], - 'on_train_batch_end': [], - 'on_train_epoch_end': [], - 'on_val_start': [], - 'on_val_batch_start': [], - 'on_val_image_end': [], - 'on_val_batch_end': [], - 'on_val_end': [], - 'on_fit_epoch_end': [], # fit = train + val - 'on_model_save': [], - 'on_train_end': [], - 'on_params_update': [], - 'teardown': [],} - self.stop_training = False # set True to interrupt training - - def register_action(self, hook, name='', callback=None): - """ - Register a new action to a callback hook - - Args: - hook: The callback hook name to register the action to - name: The name of the action for later reference - callback: The callback to fire - """ - assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" - assert callable(callback), f"callback '{callback}' is not callable" - self._callbacks[hook].append({'name': name, 'callback': callback}) - - def get_registered_actions(self, hook=None): - """" - Returns all the registered actions by callback hook - - Args: - hook: The name of the hook to check, defaults to all - """ - return self._callbacks[hook] if hook else self._callbacks - - def run(self, hook, *args, thread=False, **kwargs): - """ - Loop through the registered actions and fire all callbacks on main thread - - Args: - hook: The name of the hook to check, defaults to all - args: Arguments to receive from YOLOv5 - thread: (boolean) Run callbacks in daemon thread - kwargs: Keyword Arguments to receive from YOLOv5 - """ - - assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" - for logger in self._callbacks[hook]: - if thread: - threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start() - else: - logger['callback'](*args, **kwargs) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile deleted file mode 100644 index be5c2fb..0000000 --- a/utils/docker/Dockerfile +++ /dev/null @@ -1,65 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 -# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference - -# Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.09-py3 -RUN rm -rf /opt/pytorch # remove 1.2GB dir - -# Downloads to user config dir -ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ - -# Install linux packages -RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx - -# Install pip packages -COPY requirements.txt . -RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -r requirements.txt albumentations comet clearml gsutil notebook Pillow>=9.1.0 \ - 'opencv-python<4.6.0.66' \ - --extra-index-url https://download.pytorch.org/whl/cu113 - -# Create working directory -RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app - -# Set environment variables -ENV OMP_NUM_THREADS=8 - - -# Usage Examples ------------------------------------------------------------------------------------------------------- - -# Build and Push -# t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t - -# Pull and Run -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t - -# Pull and Run with local directory access -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t - -# Kill all -# sudo docker kill $(sudo docker ps -q) - -# Kill all image-based -# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) - -# DockerHub tag update -# t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew - -# Clean up -# docker system prune -a --volumes - -# Update Ubuntu drivers -# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ - -# DDP test -# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 - -# GCP VM from Image -# docker.io/ultralytics/yolov5:latest diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 deleted file mode 100644 index 6e8ff77..0000000 --- a/utils/docker/Dockerfile-arm64 +++ /dev/null @@ -1,41 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 -# Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi - -# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM arm64v8/ubuntu:20.04 - -# Downloads to user config dir -ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ - -# Install linux packages -RUN apt update -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev -# RUN alias python=python3 - -# Install pip packages -COPY requirements.txt . -RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt gsutil notebook \ - tensorflow-aarch64 - # tensorflowjs \ - # onnx onnx-simplifier onnxruntime \ - # coremltools openvino-dev \ - -# Create working directory -RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app - - -# Usage Examples ------------------------------------------------------------------------------------------------------- - -# Build and Push -# t=ultralytics/yolov5:latest-M1 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t - -# Pull and Run -# t=ultralytics/yolov5:latest-M1 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu deleted file mode 100644 index d6fac64..0000000 --- a/utils/docker/Dockerfile-cpu +++ /dev/null @@ -1,40 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 -# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments - -# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:20.04 - -# Downloads to user config dir -ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ - -# Install linux packages -RUN apt update -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev -# RUN alias python=python3 - -# Install pip packages -COPY requirements.txt . -RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \ - # openvino-dev \ - --extra-index-url https://download.pytorch.org/whl/cpu - -# Create working directory -RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app - - -# Usage Examples ------------------------------------------------------------------------------------------------------- - -# Build and Push -# t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t - -# Pull and Run -# t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/utils/downloads.py b/utils/downloads.py deleted file mode 100644 index 60417c1..0000000 --- a/utils/downloads.py +++ /dev/null @@ -1,189 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Download utils -""" - -import logging -import os -import platform -import subprocess -import time -import urllib -from pathlib import Path -from zipfile import ZipFile - -import requests -import torch - - -def is_url(url, check=True): - # Check if string is URL and check if URL exists - try: - url = str(url) - result = urllib.parse.urlparse(url) - assert all([result.scheme, result.netloc]) # check if is url - return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online - except (AssertionError, urllib.request.HTTPError): - return False - - -def gsutil_getsize(url=''): - # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du - s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') - return eval(s.split(' ')[0]) if len(s) else 0 # bytes - - -def url_getsize(url='https://ultralytics.com/images/bus.jpg'): - # Return downloadable file size in bytes - response = requests.head(url, allow_redirects=True) - return int(response.headers.get('content-length', -1)) - - -def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): - # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes - from utils.general import LOGGER - - file = Path(file) - assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" - try: # url1 - LOGGER.info(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO) - assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check - except Exception as e: # url2 - if file.exists(): - file.unlink() # remove partial downloads - LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') - os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail - finally: - if not file.exists() or file.stat().st_size < min_bytes: # check - if file.exists(): - file.unlink() # remove partial downloads - LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") - LOGGER.info('') - - -def attempt_download(file, repo='ultralytics/yolov5', release='v6.2'): - # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc. - from utils.general import LOGGER - - def github_assets(repository, version='latest'): - # Return GitHub repo tag (i.e. 'v6.2') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) - if version != 'latest': - version = f'tags/{version}' # i.e. tags/v6.2 - response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api - return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets - - file = Path(str(file).strip().replace("'", '')) - if not file.exists(): - # URL specified - name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. - if str(file).startswith(('http:/', 'https:/')): # download - url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ - file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... - if Path(file).is_file(): - LOGGER.info(f'Found {url} locally at {file}') # file already exists - else: - safe_download(file=file, url=url, min_bytes=1E5) - return file - - # GitHub assets - assets = [f'yolov5{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default - try: - tag, assets = github_assets(repo, release) - except Exception: - try: - tag, assets = github_assets(repo) # latest release - except Exception: - try: - tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] - except Exception: - tag = release - - file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) - if name in assets: - url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl' # backup gdrive mirror - safe_download( - file, - url=f'https://github.com/{repo}/releases/download/{tag}/{name}', - min_bytes=1E5, - error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') - - return str(file) - - -def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): - # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download() - t = time.time() - file = Path(file) - cookie = Path('cookie') # gdrive cookie - print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') - if file.exists(): - file.unlink() # remove existing file - if cookie.exists(): - cookie.unlink() # remove existing cookie - - # Attempt file download - out = "NUL" if platform.system() == "Windows" else "/dev/null" - os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') - if os.path.exists('cookie'): # large file - s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' - else: # small file - s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' - r = os.system(s) # execute, capture return - if cookie.exists(): - cookie.unlink() # remove existing cookie - - # Error check - if r != 0: - if file.exists(): - file.unlink() # remove partial - print('Download error ') # raise Exception('Download error') - return r - - # Unzip if archive - if file.suffix == '.zip': - print('unzipping... ', end='') - ZipFile(file).extractall(path=file.parent) # unzip - file.unlink() # remove zip - - print(f'Done ({time.time() - t:.1f}s)') - return r - - -def get_token(cookie="./cookie"): - with open(cookie) as f: - for line in f: - if "download" in line: - return line.split()[-1] - return "" - - -# Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- -# -# -# def upload_blob(bucket_name, source_file_name, destination_blob_name): -# # Uploads a file to a bucket -# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python -# -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(destination_blob_name) -# -# blob.upload_from_filename(source_file_name) -# -# print('File {} uploaded to {}.'.format( -# source_file_name, -# destination_blob_name)) -# -# -# def download_blob(bucket_name, source_blob_name, destination_file_name): -# # Uploads a blob from a bucket -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(source_blob_name) -# -# blob.download_to_filename(destination_file_name) -# -# print('Blob {} downloaded to {}.'.format( -# source_blob_name, -# destination_file_name)) diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md deleted file mode 100644 index a726acb..0000000 --- a/utils/flask_rest_api/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Flask REST API - -[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are -commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API -created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). - -## Requirements - -[Flask](https://palletsprojects.com/p/flask/) is required. Install with: - -```shell -$ pip install Flask -``` - -## Run - -After Flask installation run: - -```shell -$ python3 restapi.py --port 5000 -``` - -Then use [curl](https://curl.se/) to perform a request: - -```shell -$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' -``` - -The model inference results are returned as a JSON response: - -```json -[ - { - "class": 0, - "confidence": 0.8900438547, - "height": 0.9318675399, - "name": "person", - "width": 0.3264600933, - "xcenter": 0.7438579798, - "ycenter": 0.5207948685 - }, - { - "class": 0, - "confidence": 0.8440024257, - "height": 0.7155083418, - "name": "person", - "width": 0.6546785235, - "xcenter": 0.427829951, - "ycenter": 0.6334488392 - }, - { - "class": 27, - "confidence": 0.3771208823, - "height": 0.3902671337, - "name": "tie", - "width": 0.0696444362, - "xcenter": 0.3675483763, - "ycenter": 0.7991207838 - }, - { - "class": 27, - "confidence": 0.3527112305, - "height": 0.1540903747, - "name": "tie", - "width": 0.0336618312, - "xcenter": 0.7814827561, - "ycenter": 0.5065554976 - } -] -``` - -An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given -in `example_request.py` diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py deleted file mode 100644 index 773ad89..0000000 --- a/utils/flask_rest_api/example_request.py +++ /dev/null @@ -1,19 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Perform test request -""" - -import pprint - -import requests - -DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" -IMAGE = "zidane.jpg" - -# Read image -with open(IMAGE, "rb") as f: - image_data = f.read() - -response = requests.post(DETECTION_URL, files={"image": image_data}).json() - -pprint.pprint(response) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py deleted file mode 100644 index 8482435..0000000 --- a/utils/flask_rest_api/restapi.py +++ /dev/null @@ -1,48 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Run a Flask REST API exposing one or more YOLOv5s models -""" - -import argparse -import io - -import torch -from flask import Flask, request -from PIL import Image - -app = Flask(__name__) -models = {} - -DETECTION_URL = "/v1/object-detection/" - - -@app.route(DETECTION_URL, methods=["POST"]) -def predict(model): - if request.method != "POST": - return - - if request.files.get("image"): - # Method 1 - # with request.files["image"] as f: - # im = Image.open(io.BytesIO(f.read())) - - # Method 2 - im_file = request.files["image"] - im_bytes = im_file.read() - im = Image.open(io.BytesIO(im_bytes)) - - if model in models: - results = models[model](im, size=640) # reduce size=320 for faster inference - return results.pandas().xyxy[0].to_json(orient="records") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") - parser.add_argument("--port", default=5000, type=int, help="port number") - parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s') - opt = parser.parse_args() - - for m in opt.model: - models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True) - - app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat diff --git a/utils/google_app_engine/Dockerfile b/utils/google_app_engine/Dockerfile deleted file mode 100644 index 0155618..0000000 --- a/utils/google_app_engine/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -FROM gcr.io/google-appengine/python - -# Create a virtualenv for dependencies. This isolates these packages from -# system-level packages. -# Use -p python3 or -p python3.7 to select python version. Default is version 2. -RUN virtualenv /env -p python3 - -# Setting these environment variables are the same as running -# source /env/bin/activate. -ENV VIRTUAL_ENV /env -ENV PATH /env/bin:$PATH - -RUN apt-get update && apt-get install -y python-opencv - -# Copy the application's requirements.txt and run pip to install all -# dependencies into the virtualenv. -ADD requirements.txt /app/requirements.txt -RUN pip install -r /app/requirements.txt - -# Add the application source code. -ADD . /app - -# Run a WSGI server to serve the application. gunicorn must be declared as -# a dependency in requirements.txt. -CMD gunicorn -b :$PORT main:app diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt deleted file mode 100644 index 42d7ffc..0000000 --- a/utils/google_app_engine/additional_requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -# add these requirements in your app on top of the existing ones -pip==21.1 -Flask==1.0.2 -gunicorn==19.9.0 diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml deleted file mode 100644 index 5056b7c..0000000 --- a/utils/google_app_engine/app.yaml +++ /dev/null @@ -1,14 +0,0 @@ -runtime: custom -env: flex - -service: yolov5app - -liveness_check: - initial_delay_sec: 600 - -manual_scaling: - instances: 1 -resources: - cpu: 1 - memory_gb: 4 - disk_size_gb: 20 diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py deleted file mode 100644 index bc8dd76..0000000 --- a/utils/loggers/__init__.py +++ /dev/null @@ -1,404 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Logging utils -""" - -import os -import warnings -from pathlib import Path - -import pkg_resources as pkg -import torch -from torch.utils.tensorboard import SummaryWriter - -from utils.general import LOGGER, colorstr, cv2 -from utils.loggers.clearml.clearml_utils import ClearmlLogger -from utils.loggers.wandb.wandb_utils import WandbLogger -from utils.plots import plot_images, plot_labels, plot_results -from utils.torch_utils import de_parallel - -LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML -RANK = int(os.getenv('RANK', -1)) - -try: - import wandb - - assert hasattr(wandb, '__version__') # verify package import not local dir - if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}: - try: - wandb_login_success = wandb.login(timeout=30) - except wandb.errors.UsageError: # known non-TTY terminal issue - wandb_login_success = False - if not wandb_login_success: - wandb = None -except (ImportError, AssertionError): - wandb = None - -try: - import clearml - - assert hasattr(clearml, '__version__') # verify package import not local dir -except (ImportError, AssertionError): - clearml = None - -try: - if RANK not in [0, -1]: - comet_ml = None - else: - import comet_ml - - assert hasattr(comet_ml, '__version__') # verify package import not local dir - from utils.loggers.comet import CometLogger - -except (ModuleNotFoundError, ImportError, AssertionError): - comet_ml = None - - -class Loggers(): - # YOLOv5 Loggers class - def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): - self.save_dir = save_dir - self.weights = weights - self.opt = opt - self.hyp = hyp - self.plots = not opt.noplots # plot results - self.logger = logger # for printing results to console - self.include = include - self.keys = [ - 'train/box_loss', - 'train/obj_loss', - 'train/cls_loss', # train loss - 'metrics/precision', - 'metrics/recall', - 'metrics/mAP_0.5', - 'metrics/mAP_0.5:0.95', # metrics - 'val/box_loss', - 'val/obj_loss', - 'val/cls_loss', # val loss - 'x/lr0', - 'x/lr1', - 'x/lr2'] # params - self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95'] - for k in LOGGERS: - setattr(self, k, None) # init empty logger dictionary - self.csv = True # always log to csv - - # Messages - # if not wandb: - # prefix = colorstr('Weights & Biases: ') - # s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" - # self.logger.info(s) - if not clearml: - prefix = colorstr('ClearML: ') - s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" - self.logger.info(s) - if not comet_ml: - prefix = colorstr('Comet: ') - s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet" - self.logger.info(s) - # TensorBoard - s = self.save_dir - if 'tb' in self.include and not self.opt.evolve: - prefix = colorstr('TensorBoard: ') - self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") - self.tb = SummaryWriter(str(s)) - - # W&B - if wandb and 'wandb' in self.include: - wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') - run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None - self.opt.hyp = self.hyp # add hyperparameters - self.wandb = WandbLogger(self.opt, run_id) - # temp warn. because nested artifacts not supported after 0.12.10 - # if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): - # s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." - # self.logger.warning(s) - else: - self.wandb = None - - # ClearML - if clearml and 'clearml' in self.include: - self.clearml = ClearmlLogger(self.opt, self.hyp) - else: - self.clearml = None - - # Comet - if comet_ml and 'comet' in self.include: - if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"): - run_id = self.opt.resume.split("/")[-1] - self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) - - else: - self.comet_logger = CometLogger(self.opt, self.hyp) - - else: - self.comet_logger = None - - @property - def remote_dataset(self): - # Get data_dict if custom dataset artifact link is provided - data_dict = None - if self.clearml: - data_dict = self.clearml.data_dict - if self.wandb: - data_dict = self.wandb.data_dict - if self.comet_logger: - data_dict = self.comet_logger.data_dict - - return data_dict - - def on_train_start(self): - if self.comet_logger: - self.comet_logger.on_train_start() - - def on_pretrain_routine_start(self): - if self.comet_logger: - self.comet_logger.on_pretrain_routine_start() - - def on_pretrain_routine_end(self, labels, names): - # Callback runs on pre-train routine end - if self.plots: - plot_labels(labels, names, self.save_dir) - paths = self.save_dir.glob('*labels*.jpg') # training labels - if self.wandb: - self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) - # if self.clearml: - # pass # ClearML saves these images automatically using hooks - if self.comet_logger: - self.comet_logger.on_pretrain_routine_end(paths) - - def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): - log_dict = dict(zip(self.keys[0:3], vals)) - # Callback runs on train batch end - # ni: number integrated batches (since train start) - if self.plots: - if ni < 3: - f = self.save_dir / f'train_batch{ni}.jpg' # filename - plot_images(imgs, targets, paths, f) - if ni == 0 and self.tb and not self.opt.sync_bn: - log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz)) - if ni == 10 and (self.wandb or self.clearml): - files = sorted(self.save_dir.glob('train*.jpg')) - if self.wandb: - self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) - if self.clearml: - self.clearml.log_debug_samples(files, title='Mosaics') - - if self.comet_logger: - self.comet_logger.on_train_batch_end(log_dict, step=ni) - - def on_train_epoch_end(self, epoch): - # Callback runs on train epoch end - if self.wandb: - self.wandb.current_epoch = epoch + 1 - - if self.comet_logger: - self.comet_logger.on_train_epoch_end(epoch) - - def on_val_start(self): - if self.comet_logger: - self.comet_logger.on_val_start() - - def on_val_image_end(self, pred, predn, path, names, im): - # Callback runs on val image end - if self.wandb: - self.wandb.val_one_image(pred, predn, path, names, im) - if self.clearml: - self.clearml.log_image_with_boxes(path, pred, names, im) - - def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out): - if self.comet_logger: - self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out) - - def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): - # Callback runs on val end - if self.wandb or self.clearml: - files = sorted(self.save_dir.glob('val*.jpg')) - if self.wandb: - self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) - if self.clearml: - self.clearml.log_debug_samples(files, title='Validation') - - if self.comet_logger: - self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) - - def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): - # Callback runs at the end of each fit (train+val) epoch - x = dict(zip(self.keys, vals)) - if self.csv: - file = self.save_dir / 'results.csv' - n = len(x) + 1 # number of cols - s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header - with open(file, 'a') as f: - f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') - - if self.tb: - for k, v in x.items(): - self.tb.add_scalar(k, v, epoch) - elif self.clearml: # log to ClearML if TensorBoard not used - for k, v in x.items(): - title, series = k.split('/') - self.clearml.task.get_logger().report_scalar(title, series, v, epoch) - - if self.wandb: - if best_fitness == fi: - best_results = [epoch] + vals[3:7] - for i, name in enumerate(self.best_keys): - self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary - self.wandb.log(x) - self.wandb.end_epoch(best_result=best_fitness == fi) - - if self.clearml: - self.clearml.current_epoch_logged_images = set() # reset epoch image limit - self.clearml.current_epoch += 1 - - if self.comet_logger: - self.comet_logger.on_fit_epoch_end(x, epoch=epoch) - - def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): - # Callback runs on model save event - if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1: - if self.wandb: - self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) - if self.clearml: - self.clearml.task.update_output_model(model_path=str(last), - model_name='Latest Model', - auto_delete_file=False) - - if self.comet_logger: - self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi) - - def on_train_end(self, last, best, epoch, results): - # Callback runs on training end, i.e. saving best model - if self.plots: - plot_results(file=self.save_dir / 'results.csv') # save results.png - files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] - files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter - self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}") - - if self.tb and not self.clearml: # These images are already captured by ClearML by now, we don't want doubles - for f in files: - self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') - - if self.wandb: - self.wandb.log(dict(zip(self.keys[3:10], results))) - self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) - # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model - if not self.opt.evolve: - wandb.log_artifact(str(best if best.exists() else last), - type='model', - name=f'run_{self.wandb.wandb_run.id}_model', - aliases=['latest', 'best', 'stripped']) - self.wandb.finish_run() - - if self.clearml and not self.opt.evolve: - self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), - name='Best Model', - auto_delete_file=False) - - if self.comet_logger: - final_results = dict(zip(self.keys[3:10], results)) - self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results) - - def on_params_update(self, params: dict): - # Update hyperparams or configs of the experiment - if self.wandb: - self.wandb.wandb_run.config.update(params, allow_val_change=True) - if self.comet_logger: - self.comet_logger.on_params_update(params) - - -class GenericLogger: - """ - YOLOv5 General purpose logger for non-task specific logging - Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) - Arguments - opt: Run arguments - console_logger: Console logger - include: loggers to include - """ - - def __init__(self, opt, console_logger, include=('tb', 'wandb')): - # init default loggers - self.save_dir = Path(opt.save_dir) - self.include = include - self.console_logger = console_logger - self.csv = self.save_dir / 'results.csv' # CSV logger - if 'tb' in self.include: - prefix = colorstr('TensorBoard: ') - self.console_logger.info( - f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/") - self.tb = SummaryWriter(str(self.save_dir)) - - if wandb and 'wandb' in self.include: - self.wandb = wandb.init(project=web_project_name(str(opt.project)), - name=None if opt.name == "exp" else opt.name, - config=opt) - else: - self.wandb = None - - def log_metrics(self, metrics, epoch): - # Log metrics dictionary to all loggers - if self.csv: - keys, vals = list(metrics.keys()), list(metrics.values()) - n = len(metrics) + 1 # number of cols - s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header - with open(self.csv, 'a') as f: - f.write(s + ('%23.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') - - if self.tb: - for k, v in metrics.items(): - self.tb.add_scalar(k, v, epoch) - - if self.wandb: - self.wandb.log(metrics, step=epoch) - - def log_images(self, files, name='Images', epoch=0): - # Log images to all loggers - files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path - files = [f for f in files if f.exists()] # filter by exists - - if self.tb: - for f in files: - self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') - - if self.wandb: - self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch) - - def log_graph(self, model, imgsz=(640, 640)): - # Log model graph to all loggers - if self.tb: - log_tensorboard_graph(self.tb, model, imgsz) - - def log_model(self, model_path, epoch=0, metadata={}): - # Log model to all loggers - if self.wandb: - art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) - art.add_file(str(model_path)) - wandb.log_artifact(art) - - def update_params(self, params): - # Update the paramters logged - if self.wandb: - wandb.run.config.update(params, allow_val_change=True) - - -def log_tensorboard_graph(tb, model, imgsz=(640, 640)): - # Log model graph to TensorBoard - try: - p = next(model.parameters()) # for device, type - imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand - im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image (WARNING: must be zeros, not empty) - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress jit trace warning - tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) - except Exception as e: - LOGGER.warning(f'WARNING ⚠️ TensorBoard graph visualization failure {e}') - - -def web_project_name(project): - # Convert local project name to web project name - if not project.startswith('runs/train'): - return project - suffix = '-Classify' if project.endswith('-cls') else '-Segment' if project.endswith('-seg') else '' - return f'YOLOv5{suffix}' diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md deleted file mode 100644 index 64eef6b..0000000 --- a/utils/loggers/clearml/README.md +++ /dev/null @@ -1,222 +0,0 @@ -# ClearML Integration - -Clear|MLClear|ML - -## About ClearML - -[ClearML](https://cutt.ly/yolov5-tutorial-clearml) is an [open-source](https://github.com/allegroai/clearml) toolbox designed to save you time ⏱️. - -🔨 Track every YOLOv5 training run in the experiment manager - -🔧 Version and easily access your custom training data with the integrated ClearML Data Versioning Tool - -🔦 Remotely train and monitor your YOLOv5 training runs using ClearML Agent - -🔬 Get the very best mAP using ClearML Hyperparameter Optimization - -🔭 Turn your newly trained YOLOv5 model into an API with just a few commands using ClearML Serving - -
-And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline! -
-
- -![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif) - - -
-
- -## 🦾 Setting Things Up - -To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one: - -Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-tutorial-clearml) or you can set up your own server, see [here](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). Even the server is open-source, so even if you're dealing with sensitive data, you should be good to go! - -1. Install the `clearml` python package: - - ```bash - pip install clearml - ``` - -1. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions: - - ```bash - clearml-init - ``` - -That's it! You're done 😎 - -
- -## 🚀 Training YOLOv5 With ClearML - -To enable ClearML experiment tracking, simply install the ClearML pip package. - -```bash -pip install clearml -``` - -This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, head over to our custom logger, where you can change it: `utils/loggers/clearml/clearml_utils.py` - -```bash -python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache -``` - -This will capture: -- Source code + uncommitted changes -- Installed packages -- (Hyper)parameters -- Model files (use `--save-period n` to save a checkpoint every n epochs) -- Console output -- Scalars (mAP_0.5, mAP_0.5:0.95, precision, recall, losses, learning rates, ...) -- General info such as machine details, runtime, creation date etc. -- All produced plots such as label correlogram and confusion matrix -- Images with bounding boxes per epoch -- Mosaic per epoch -- Validation images per epoch -- ... - -That's a lot right? 🤯 -Now, we can visualize all of this information in the ClearML UI to get an overview of our training progress. Add custom columns to the table view (such as e.g. mAP_0.5) so you can easily sort on the best performing model. Or select multiple experiments and directly compare them! - -There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works! - -
- -## 🔗 Dataset Version Management - -Versioning your data separately from your code is generally a good idea and makes it easy to aqcuire the latest version too. This repository supports supplying a dataset version ID and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! - -![ClearML Dataset Interface](https://github.com/thepycoder/clearml_screenshots/raw/main/clearml_data.gif) - -### Prepare Your Dataset - -The YOLOv5 repository supports a number of different datasets by using yaml files containing their information. By default datasets are downloaded to the `../datasets` folder in relation to the repository root folder. So if you downloaded the `coco128` dataset using the link in the yaml or with the scripts provided by yolov5, you get this folder structure: - -``` -.. -|_ yolov5 -|_ datasets - |_ coco128 - |_ images - |_ labels - |_ LICENSE - |_ README.txt -``` -But this can be any dataset you wish. Feel free to use your own, as long as you keep to this folder structure. - -Next, ⚠️**copy the corresponding yaml file to the root of the dataset folder**⚠️. This yaml files contains the information ClearML will need to properly use the dataset. You can make this yourself too, of course, just follow the structure of the example yamls. - -Basically we need the following keys: `path`, `train`, `test`, `val`, `nc`, `names`. - -``` -.. -|_ yolov5 -|_ datasets - |_ coco128 - |_ images - |_ labels - |_ coco128.yaml # <---- HERE! - |_ LICENSE - |_ README.txt -``` - -### Upload Your Dataset - -To get this dataset into ClearML as a versionned dataset, go to the dataset root folder and run the following command: -```bash -cd coco128 -clearml-data sync --project YOLOv5 --name coco128 --folder . -``` - -The command `clearml-data sync` is actually a shorthand command. You could also run these commands one after the other: -```bash -# Optionally add --parent if you want to base -# this version on another dataset version, so no duplicate files are uploaded! -clearml-data create --name coco128 --project YOLOv5 -clearml-data add --files . -clearml-data close -``` - -### Run Training Using A ClearML Dataset - -Now that you have a ClearML dataset, you can very simply use it to train custom YOLOv5 🚀 models! - -```bash -python train.py --img 640 --batch 16 --epochs 3 --data clearml:// --weights yolov5s.pt --cache -``` - -
- -## 👀 Hyperparameter Optimization - -Now that we have our experiments and data versioned, it's time to take a look at what we can build on top! - -Using the code information, installed packages and environment details, the experiment itself is now **completely reproducible**. In fact, ClearML allows you to clone an experiment and even change its parameters. We can then just rerun it with these new parameters automatically, this is basically what HPO does! - -To **run hyperparameter optimization locally**, we've included a pre-made script for you. Just make sure a training task has been run at least once, so it is in the ClearML experiment manager, we will essentially clone it and change its hyperparameters. - -You'll need to fill in the ID of this `template task` in the script found at `utils/loggers/clearml/hpo.py` and then just run it :) You can change `task.execute_locally()` to `task.execute()` to put it in a ClearML queue and have a remote agent work on it instead. - -```bash -# To use optuna, install it first, otherwise you can change the optimizer to just be RandomSearch -pip install optuna -python utils/loggers/clearml/hpo.py -``` - -![HPO](https://github.com/thepycoder/clearml_screenshots/raw/main/hpo.png) - -## 🤯 Remote Execution (advanced) - -Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site or you have some budget to use cloud GPUs. -This is where the ClearML Agent comes into play. Check out what the agent can do here: - -- [YouTube video](https://youtu.be/MX3BrXnaULs) -- [Documentation](https://clear.ml/docs/latest/docs/clearml_agent) - -In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager. - -You can turn any machine (a cloud VM, a local GPU machine, your own laptop ... ) into a ClearML agent by simply running: -```bash -clearml-agent daemon --queue [--docker] -``` - -### Cloning, Editing And Enqueuing - -With our agent running, we can give it some work. Remember from the HPO section that we can clone a task and edit the hyperparameters? We can do that from the interface too! - -🪄 Clone the experiment by right clicking it - -🎯 Edit the hyperparameters to what you wish them to be - -⏳ Enqueue the task to any of the queues by right clicking it - -![Enqueue a task from the UI](https://github.com/thepycoder/clearml_screenshots/raw/main/enqueue.gif) - -### Executing A Task Remotely - -Now you can clone a task like we explained above, or simply mark your current script by adding `task.execute_remotely()` and on execution it will be put into a queue, for the agent to start working on! - -To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instatiated: -```python -# ... -# Loggers -data_dict = None -if RANK in {-1, 0}: - loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance - if loggers.clearml: - loggers.clearml.task.execute_remotely(queue='my_queue') # <------ ADD THIS LINE - # Data_dict is either None is user did not choose for ClearML dataset or is filled in by ClearML - data_dict = loggers.clearml.data_dict -# ... -``` -When running the training script after this change, python will run the script up until that line, after which it will package the code and send it to the queue instead! - -### Autoscaling workers - -ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines and you stop paying! - -Check out the autoscalers getting started video below. - -[![Watch the video](https://img.youtube.com/vi/j4XVMAaUt3E/0.jpg)](https://youtu.be/j4XVMAaUt3E) diff --git a/utils/loggers/clearml/__init__.py b/utils/loggers/clearml/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py deleted file mode 100644 index eb1c12c..0000000 --- a/utils/loggers/clearml/clearml_utils.py +++ /dev/null @@ -1,157 +0,0 @@ -"""Main Logger class for ClearML experiment tracking.""" -import glob -import re -from pathlib import Path - -import numpy as np -import yaml - -from utils.plots import Annotator, colors - -try: - import clearml - from clearml import Dataset, Task - - assert hasattr(clearml, '__version__') # verify package import not local dir -except (ImportError, AssertionError): - clearml = None - - -def construct_dataset(clearml_info_string): - """Load in a clearml dataset and fill the internal data_dict with its contents. - """ - dataset_id = clearml_info_string.replace('clearml://', '') - dataset = Dataset.get(dataset_id=dataset_id) - dataset_root_path = Path(dataset.get_local_copy()) - - # We'll search for the yaml file definition in the dataset - yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml"))) - if len(yaml_filenames) > 1: - raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains ' - 'the dataset definition this way.') - elif len(yaml_filenames) == 0: - raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file ' - 'inside the dataset root path.') - with open(yaml_filenames[0]) as f: - dataset_definition = yaml.safe_load(f) - - assert set(dataset_definition.keys()).issuperset( - {'train', 'test', 'val', 'nc', 'names'} - ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" - - data_dict = dict() - data_dict['train'] = str( - (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None - data_dict['test'] = str( - (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None - data_dict['val'] = str( - (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None - data_dict['nc'] = dataset_definition['nc'] - data_dict['names'] = dataset_definition['names'] - - return data_dict - - -class ClearmlLogger: - """Log training runs, datasets, models, and predictions to ClearML. - - This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default, - this information includes hyperparameters, system configuration and metrics, model metrics, code information and - basic data metrics and analyses. - - By providing additional command line arguments to train.py, datasets, - models and predictions can also be logged. - """ - - def __init__(self, opt, hyp): - """ - - Initialize ClearML Task, this object will capture the experiment - - Upload dataset version to ClearML Data if opt.upload_dataset is True - - arguments: - opt (namespace) -- Commandline arguments for this run - hyp (dict) -- Hyperparameters for this run - - """ - self.current_epoch = 0 - # Keep tracked of amount of logged images to enforce a limit - self.current_epoch_logged_images = set() - # Maximum number of images to log to clearML per epoch - self.max_imgs_to_log_per_epoch = 16 - # Get the interval of epochs when bounding box images should be logged - self.bbox_interval = opt.bbox_interval - self.clearml = clearml - self.task = None - self.data_dict = None - if self.clearml: - self.task = Task.init( - project_name='YOLOv5', - task_name='training', - tags=['YOLOv5'], - output_uri=True, - auto_connect_frameworks={'pytorch': False} - # We disconnect pytorch auto-detection, because we added manual model save points in the code - ) - # ClearML's hooks will already grab all general parameters - # Only the hyperparameters coming from the yaml config file - # will have to be added manually! - self.task.connect(hyp, name='Hyperparameters') - - # Get ClearML Dataset Version if requested - if opt.data.startswith('clearml://'): - # data_dict should have the following keys: - # names, nc (number of classes), test, train, val (all three relative paths to ../datasets) - self.data_dict = construct_dataset(opt.data) - # Set data to data_dict because wandb will crash without this information and opt is the best way - # to give it to them - opt.data = self.data_dict - - def log_debug_samples(self, files, title='Debug Samples'): - """ - Log files (images) as debug samples in the ClearML task. - - arguments: - files (List(PosixPath)) a list of file paths in PosixPath format - title (str) A title that groups together images with the same values - """ - for f in files: - if f.exists(): - it = re.search(r'_batch(\d+)', f.name) - iteration = int(it.groups()[0]) if it else 0 - self.task.get_logger().report_image(title=title, - series=f.name.replace(it.group(), ''), - local_path=str(f), - iteration=iteration) - - def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25): - """ - Draw the bounding boxes on a single image and report the result as a ClearML debug sample. - - arguments: - image_path (PosixPath) the path the original image file - boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] - class_names (dict): dict containing mapping of class int to class name - image (Tensor): A torch tensor containing the actual image data - """ - if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0: - # Log every bbox_interval times and deduplicate for any intermittend extra eval runs - if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images: - im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2)) - annotator = Annotator(im=im, pil=True) - for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])): - color = colors(i) - - class_name = class_names[int(class_nr)] - confidence_percentage = round(float(conf) * 100, 2) - label = f"{class_name}: {confidence_percentage}%" - - if conf > conf_threshold: - annotator.rectangle(box.cpu().numpy(), outline=color) - annotator.box_label(box.cpu().numpy(), label=label, color=color) - - annotated_image = annotator.result() - self.task.get_logger().report_image(title='Bounding Boxes', - series=image_path.name, - iteration=self.current_epoch, - image=annotated_image) - self.current_epoch_logged_images.add(image_path) diff --git a/utils/loggers/clearml/hpo.py b/utils/loggers/clearml/hpo.py deleted file mode 100644 index ee518b0..0000000 --- a/utils/loggers/clearml/hpo.py +++ /dev/null @@ -1,84 +0,0 @@ -from clearml import Task -# Connecting ClearML with the current process, -# from here on everything is logged automatically -from clearml.automation import HyperParameterOptimizer, UniformParameterRange -from clearml.automation.optuna import OptimizerOptuna - -task = Task.init(project_name='Hyper-Parameter Optimization', - task_name='YOLOv5', - task_type=Task.TaskTypes.optimizer, - reuse_last_task_id=False) - -# Example use case: -optimizer = HyperParameterOptimizer( - # This is the experiment we want to optimize - base_task_id='', - # here we define the hyper-parameters to optimize - # Notice: The parameter name should exactly match what you see in the UI: / - # For Example, here we see in the base experiment a section Named: "General" - # under it a parameter named "batch_size", this becomes "General/batch_size" - # If you have `argparse` for example, then arguments will appear under the "Args" section, - # and you should instead pass "Args/batch_size" - hyper_parameters=[ - UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1), - UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0), - UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98), - UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001), - UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0), - UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95), - UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2), - UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2), - UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0), - UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0), - UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0), - UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0), - UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7), - UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0), - UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0), - UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1), - UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0), - UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0), - UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001), - UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)], - # this is the objective metric we want to maximize/minimize - objective_metric_title='metrics', - objective_metric_series='mAP_0.5', - # now we decide if we want to maximize it or minimize it (accuracy we maximize) - objective_metric_sign='max', - # let us limit the number of concurrent experiments, - # this in turn will make sure we do dont bombard the scheduler with experiments. - # if we have an auto-scaler connected, this, by proxy, will limit the number of machine - max_number_of_concurrent_tasks=1, - # this is the optimizer class (actually doing the optimization) - # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band) - optimizer_class=OptimizerOptuna, - # If specified only the top K performing Tasks will be kept, the others will be automatically archived - save_top_k_tasks_only=5, # 5, - compute_time_limit=None, - total_max_jobs=20, - min_iteration_per_job=None, - max_iteration_per_job=None, -) - -# report every 10 seconds, this is way too often, but we are testing here -optimizer.set_report_period(10 / 60) -# You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent -# an_optimizer.start_locally(job_complete_callback=job_complete_callback) -# set the time limit for the optimization process (2 hours) -optimizer.set_time_limit(in_minutes=120.0) -# Start the optimization process in the local environment -optimizer.start_locally() -# wait until process is done (notice we are controlling the optimization process in the background) -optimizer.wait() -# make sure background optimization stopped -optimizer.stop() - -print('We are done, good bye') diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md deleted file mode 100644 index 3a51cb9..0000000 --- a/utils/loggers/comet/README.md +++ /dev/null @@ -1,256 +0,0 @@ - - -# YOLOv5 with Comet - -This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet) - -# About Comet - -Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. - -Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! -Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! - -# Getting Started - -## Install Comet - -```shell -pip install comet_ml -``` - -## Configure Comet Credentials - -There are two ways to configure Comet with YOLOv5. - -You can either set your credentials through enviroment variables - -**Environment Variables** - -```shell -export COMET_API_KEY= -export COMET_PROJECT_NAME= # This will default to 'yolov5' -``` - -Or create a `.comet.config` file in your working directory and set your credentials there. - -**Comet Configuration File** - -``` -[comet] -api_key= -project_name= # This will default to 'yolov5' -``` - -## Run the Training Script - -```shell -# Train YOLOv5s on COCO128 for 5 epochs -python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt -``` - -That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI - -yolo-ui - -# Try out an Example! -Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) - -Or better yet, try it out yourself in this Colab Notebook - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing) - -# Log automatically - -By default, Comet will log the following items - -## Metrics -- Box Loss, Object Loss, Classification Loss for the training and validation data -- mAP_0.5, mAP_0.5:0.95 metrics for the validation data. -- Precision and Recall for the validation data - -## Parameters - -- Model Hyperparameters -- All parameters passed through the command line options - -## Visualizations - -- Confusion Matrix of the model predictions on the validation data -- Plots for the PR and F1 curves across all classes -- Correlogram of the Class Labels - -# Configure Comet Logging - -Comet can be configured to log additional data either through command line flags passed to the training script -or through environment variables. - -```shell -export COMET_MODE=online # Set whether to run Comet in 'online' or 'offline' mode. Defaults to online -export COMET_MODEL_NAME= #Set the name for the saved model. Defaults to yolov5 -export COMET_LOG_CONFUSION_MATRIX=false # Set to disable logging a Comet Confusion Matrix. Defaults to true -export COMET_MAX_IMAGE_UPLOADS= # Controls how many total image predictions to log to Comet. Defaults to 100. -export COMET_LOG_PER_CLASS_METRICS=true # Set to log evaluation metrics for each detected class at the end of training. Defaults to false -export COMET_DEFAULT_CHECKPOINT_FILENAME= # Set this if you would like to resume training from a different checkpoint. Defaults to 'last.pt' -export COMET_LOG_BATCH_LEVEL_METRICS=true # Set this if you would like to log training metrics at the batch level. Defaults to false. -export COMET_LOG_PREDICTIONS=true # Set this to false to disable logging model predictions -``` - -## Logging Checkpoints with Comet - -Logging Models to Comet is disabled by default. To enable it, pass the `save-period` argument to the training script. This will save the -logged checkpoints to Comet based on the interval value provided by `save-period` - -```shell -python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt \ ---save-period 1 -``` - -## Logging Model Predictions - -By default, model predictions (images, ground truth labels and bounding boxes) will be logged to Comet. - -You can control the frequency of logged predictions and the associated images by passing the `bbox_interval` command line argument. Predictions can be visualized using Comet's Object Detection Custom Panel. This frequency corresponds to every Nth batch of data per epoch. In the example below, we are logging every 2nd batch of data for each epoch. - -**Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly. - -Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) - - -```shell -python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt \ ---bbox_interval 2 -``` - -### Controlling the number of Prediction Images logged to Comet - -When logging predictions from YOLOv5, Comet will log the images associated with each set of predictions. By default a maximum of 100 validation images are logged. You can increase or decrease this number using the `COMET_MAX_IMAGE_UPLOADS` environment variable. - -```shell -env COMET_MAX_IMAGE_UPLOADS=200 python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt \ ---bbox_interval 1 -``` - -### Logging Class Level Metrics - -Use the `COMET_LOG_PER_CLASS_METRICS` environment variable to log mAP, precision, recall, f1 for each class. - -```shell -env COMET_LOG_PER_CLASS_METRICS=true python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt -``` - -## Uploading a Dataset to Comet Artifacts - -If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration), you can do so using the `upload_dataset` flag. - -The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. - -```shell -python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data coco128.yaml \ ---weights yolov5s.pt \ ---upload_dataset -``` - -You can find the uploaded dataset in the Artifacts tab in your Comet Workspace -artifact-1 - -You can preview the data directly in the Comet UI. -artifact-2 - -Artifacts are versioned and also support adding metadata about the dataset. Comet will automatically log the metadata from your dataset `yaml` file -artifact-3 - -### Using a saved Artifact - -If you would like to use a dataset from Comet Artifacts, set the `path` variable in your dataset `yaml` file to point to the following Artifact resource URL. - -``` -# contents of artifact.yaml file -path: "comet:///:" -``` -Then pass this file to your training script in the following way - -```shell -python train.py \ ---img 640 \ ---batch 16 \ ---epochs 5 \ ---data artifact.yaml \ ---weights yolov5s.pt -``` - -Artifacts also allow you to track the lineage of data as it flows through your Experimentation workflow. Here you can see a graph that shows you all the experiments that have used your uploaded dataset. -artifact-4 - -## Resuming a Training Run - -If your training run is interrupted for any reason, e.g. disrupted internet connection, you can resume the run using the `resume` flag and the Comet Run Path. - -The Run Path has the following format `comet:////`. - -This will restore the run to its state before the interruption, which includes restoring the model from a checkpoint, restoring all hyperparameters and training arguments and downloading Comet dataset Artifacts if they were used in the original run. The resumed run will continue logging to the existing Experiment in the Comet UI - -```shell -python train.py \ ---resume "comet://" -``` - -## Hyperparameter Search with the Comet Optimizer - -YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualie hyperparameter sweeps in the Comet UI. - -### Configuring an Optimizer Sweep - -To configure the Comet Optimizer, you will have to create a JSON file with the information about the sweep. An example file has been provided in `utils/loggers/comet/optimizer_config.json` - -```shell -python utils/loggers/comet/hpo.py \ - --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" -``` - -The `hpo.py` script accepts the same arguments as `train.py`. If you wish to pass additional arguments to your sweep simply add them after -the script. - -```shell -python utils/loggers/comet/hpo.py \ - --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" \ - --save-period 1 \ - --bbox_interval 1 -``` - -### Running a Sweep in Parallel - -```shell -comet optimizer -j utils/loggers/comet/hpo.py \ - utils/loggers/comet/optimizer_config.json" -``` - -### Visualizing Results - -Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) - -hyperparameter-yolo diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py deleted file mode 100644 index b0318f8..0000000 --- a/utils/loggers/comet/__init__.py +++ /dev/null @@ -1,508 +0,0 @@ -import glob -import json -import logging -import os -import sys -from pathlib import Path - -logger = logging.getLogger(__name__) - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -try: - import comet_ml - - # Project Configuration - config = comet_ml.config.get_config() - COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") -except (ModuleNotFoundError, ImportError): - comet_ml = None - COMET_PROJECT_NAME = None - -import PIL -import torch -import torchvision.transforms as T -import yaml - -from utils.dataloaders import img2label_paths -from utils.general import check_dataset, scale_boxes, xywh2xyxy -from utils.metrics import box_iou - -COMET_PREFIX = "comet://" - -COMET_MODE = os.getenv("COMET_MODE", "online") - -# Model Saving Settings -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") - -# Dataset Artifact Settings -COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true" - -# Evaluation Settings -COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true" -COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true" -COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100)) - -# Confusion Matrix Settings -CONF_THRES = float(os.getenv("CONF_THRES", 0.001)) -IOU_THRES = float(os.getenv("IOU_THRES", 0.6)) - -# Batch Logging Settings -COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true" -COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1) -COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1) -COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true" - -RANK = int(os.getenv("RANK", -1)) - -to_pil = T.ToPILImage() - - -class CometLogger: - """Log metrics, parameters, source code, models and much more - with Comet - """ - - def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: - self.job_type = job_type - self.opt = opt - self.hyp = hyp - - # Comet Flags - self.comet_mode = COMET_MODE - - self.save_model = opt.save_period > -1 - self.model_name = COMET_MODEL_NAME - - # Batch Logging Settings - self.log_batch_metrics = COMET_LOG_BATCH_METRICS - self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL - - # Dataset Artifact Settings - self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET - self.resume = self.opt.resume - - # Default parameters to pass to Experiment objects - self.default_experiment_kwargs = { - "log_code": False, - "log_env_gpu": True, - "log_env_cpu": True, - "project_name": COMET_PROJECT_NAME,} - self.default_experiment_kwargs.update(experiment_kwargs) - self.experiment = self._get_experiment(self.comet_mode, run_id) - - self.data_dict = self.check_dataset(self.opt.data) - self.class_names = self.data_dict["names"] - self.num_classes = self.data_dict["nc"] - - self.logged_images_count = 0 - self.max_images = COMET_MAX_IMAGE_UPLOADS - - if run_id is None: - self.experiment.log_other("Created from", "YOLOv5") - if not isinstance(self.experiment, comet_ml.OfflineExperiment): - workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:] - self.experiment.log_other( - "Run Path", - f"{workspace}/{project_name}/{experiment_id}", - ) - self.log_parameters(vars(opt)) - self.log_parameters(self.opt.hyp) - self.log_asset_data( - self.opt.hyp, - name="hyperparameters.json", - metadata={"type": "hyp-config-file"}, - ) - self.log_asset( - f"{self.opt.save_dir}/opt.yaml", - metadata={"type": "opt-config-file"}, - ) - - self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX - - if hasattr(self.opt, "conf_thres"): - self.conf_thres = self.opt.conf_thres - else: - self.conf_thres = CONF_THRES - if hasattr(self.opt, "iou_thres"): - self.iou_thres = self.opt.iou_thres - else: - self.iou_thres = IOU_THRES - - self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres}) - - self.comet_log_predictions = COMET_LOG_PREDICTIONS - if self.opt.bbox_interval == -1: - self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10 - else: - self.comet_log_prediction_interval = self.opt.bbox_interval - - if self.comet_log_predictions: - self.metadata_dict = {} - self.logged_image_names = [] - - self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS - - self.experiment.log_others({ - "comet_mode": COMET_MODE, - "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS, - "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS, - "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS, - "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX, - "comet_model_name": COMET_MODEL_NAME,}) - - # Check if running the Experiment with the Comet Optimizer - if hasattr(self.opt, "comet_optimizer_id"): - self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id) - self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective) - self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric) - self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp)) - - def _get_experiment(self, mode, experiment_id=None): - if mode == "offline": - if experiment_id is not None: - return comet_ml.ExistingOfflineExperiment( - previous_experiment=experiment_id, - **self.default_experiment_kwargs, - ) - - return comet_ml.OfflineExperiment(**self.default_experiment_kwargs,) - - else: - try: - if experiment_id is not None: - return comet_ml.ExistingExperiment( - previous_experiment=experiment_id, - **self.default_experiment_kwargs, - ) - - return comet_ml.Experiment(**self.default_experiment_kwargs) - - except ValueError: - logger.warning("COMET WARNING: " - "Comet credentials have not been set. " - "Comet will default to offline logging. " - "Please set your credentials to enable online logging.") - return self._get_experiment("offline", experiment_id) - - return - - def log_metrics(self, log_dict, **kwargs): - self.experiment.log_metrics(log_dict, **kwargs) - - def log_parameters(self, log_dict, **kwargs): - self.experiment.log_parameters(log_dict, **kwargs) - - def log_asset(self, asset_path, **kwargs): - self.experiment.log_asset(asset_path, **kwargs) - - def log_asset_data(self, asset, **kwargs): - self.experiment.log_asset_data(asset, **kwargs) - - def log_image(self, img, **kwargs): - self.experiment.log_image(img, **kwargs) - - def log_model(self, path, opt, epoch, fitness_score, best_model=False): - if not self.save_model: - return - - model_metadata = { - "fitness_score": fitness_score[-1], - "epochs_trained": epoch + 1, - "save_period": opt.save_period, - "total_epochs": opt.epochs,} - - model_files = glob.glob(f"{path}/*.pt") - for model_path in model_files: - name = Path(model_path).name - - self.experiment.log_model( - self.model_name, - file_or_folder=model_path, - file_name=name, - metadata=model_metadata, - overwrite=True, - ) - - def check_dataset(self, data_file): - with open(data_file) as f: - data_config = yaml.safe_load(f) - - if data_config['path'].startswith(COMET_PREFIX): - path = data_config['path'].replace(COMET_PREFIX, "") - data_dict = self.download_dataset_artifact(path) - - return data_dict - - self.log_asset(self.opt.data, metadata={"type": "data-config-file"}) - - return check_dataset(data_file) - - def log_predictions(self, image, labelsn, path, shape, predn): - if self.logged_images_count >= self.max_images: - return - detections = predn[predn[:, 4] > self.conf_thres] - iou = box_iou(labelsn[:, 1:], detections[:, :4]) - mask, _ = torch.where(iou > self.iou_thres) - if len(mask) == 0: - return - - filtered_detections = detections[mask] - filtered_labels = labelsn[mask] - - image_id = path.split("/")[-1].split(".")[0] - image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" - if image_name not in self.logged_image_names: - native_scale_image = PIL.Image.open(path) - self.log_image(native_scale_image, name=image_name) - self.logged_image_names.append(image_name) - - metadata = [] - for cls, *xyxy in filtered_labels.tolist(): - metadata.append({ - "label": f"{self.class_names[int(cls)]}-gt", - "score": 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) - for *xyxy, conf, cls in filtered_detections.tolist(): - metadata.append({ - "label": f"{self.class_names[int(cls)]}", - "score": conf * 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) - - self.metadata_dict[image_name] = metadata - self.logged_images_count += 1 - - return - - def preprocess_prediction(self, image, labels, shape, pred): - nl, _ = labels.shape[0], pred.shape[0] - - # Predictions - if self.opt.single_cls: - pred[:, 5] = 0 - - predn = pred.clone() - scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) - - labelsn = None - if nl: - tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_boxes(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels - labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels - scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred - - return predn, labelsn - - def add_assets_to_artifact(self, artifact, path, asset_path, split): - img_paths = sorted(glob.glob(f"{asset_path}/*")) - label_paths = img2label_paths(img_paths) - - for image_file, label_file in zip(img_paths, label_paths): - image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) - - try: - artifact.add(image_file, logical_path=image_logical_path, metadata={"split": split}) - artifact.add(label_file, logical_path=label_logical_path, metadata={"split": split}) - except ValueError as e: - logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') - logger.error(f"COMET ERROR: {e}") - continue - - return artifact - - def upload_dataset_artifact(self): - dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset") - path = str((ROOT / Path(self.data_dict["path"])).resolve()) - - metadata = self.data_dict.copy() - for key in ["train", "val", "test"]: - split_path = metadata.get(key) - if split_path is not None: - metadata[key] = split_path.replace(path, "") - - artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata) - for key in metadata.keys(): - if key in ["train", "val", "test"]: - if isinstance(self.upload_dataset, str) and (key != self.upload_dataset): - continue - - asset_path = self.data_dict.get(key) - if asset_path is not None: - artifact = self.add_assets_to_artifact(artifact, path, asset_path, key) - - self.experiment.log_artifact(artifact) - - return - - def download_dataset_artifact(self, artifact_path): - logged_artifact = self.experiment.get_artifact(artifact_path) - artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name) - logged_artifact.download(artifact_save_dir) - - metadata = logged_artifact.metadata - data_dict = metadata.copy() - data_dict["path"] = artifact_save_dir - - metadata_names = metadata.get("names") - if type(metadata_names) == dict: - data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} - elif type(metadata_names) == list: - data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} - else: - raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" - - data_dict = self.update_data_paths(data_dict) - return data_dict - - def update_data_paths(self, data_dict): - path = data_dict.get("path", "") - - for split in ["train", "val", "test"]: - if data_dict.get(split): - split_path = data_dict.get(split) - data_dict[split] = (f"{path}/{split_path}" if isinstance(split, str) else [ - f"{path}/{x}" for x in split_path]) - - return data_dict - - def on_pretrain_routine_end(self, paths): - if self.opt.resume: - return - - for path in paths: - self.log_asset(str(path)) - - if self.upload_dataset: - if not self.resume: - self.upload_dataset_artifact() - - return - - def on_train_start(self): - self.log_parameters(self.hyp) - - def on_train_epoch_start(self): - return - - def on_train_epoch_end(self, epoch): - self.experiment.curr_epoch = epoch - - return - - def on_train_batch_start(self): - return - - def on_train_batch_end(self, log_dict, step): - self.experiment.curr_step = step - if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0): - self.log_metrics(log_dict, step=step) - - return - - def on_train_end(self, files, save_dir, last, best, epoch, results): - if self.comet_log_predictions: - curr_epoch = self.experiment.curr_epoch - self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch) - - for f in files: - self.log_asset(f, metadata={"epoch": epoch}) - self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch}) - - if not self.opt.evolve: - model_path = str(best if best.exists() else last) - name = Path(model_path).name - if self.save_model: - self.experiment.log_model( - self.model_name, - file_or_folder=model_path, - file_name=name, - overwrite=True, - ) - - # Check if running Experiment with Comet Optimizer - if hasattr(self.opt, 'comet_optimizer_id'): - metric = results.get(self.opt.comet_optimizer_metric) - self.experiment.log_other('optimizer_metric_value', metric) - - self.finish_run() - - def on_val_start(self): - return - - def on_val_batch_start(self): - return - - def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs): - if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)): - return - - for si, pred in enumerate(outputs): - if len(pred) == 0: - continue - - image = images[si] - labels = targets[targets[:, 0] == si, 1:] - shape = shapes[si] - path = paths[si] - predn, labelsn = self.preprocess_prediction(image, labels, shape, pred) - if labelsn is not None: - self.log_predictions(image, labelsn, path, shape, predn) - - return - - def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): - if self.comet_log_per_class_metrics: - if self.num_classes > 1: - for i, c in enumerate(ap_class): - class_name = self.class_names[c] - self.experiment.log_metrics( - { - 'mAP@.5': ap50[i], - 'mAP@.5:.95': ap[i], - 'precision': p[i], - 'recall': r[i], - 'f1': f1[i], - 'true_positives': tp[i], - 'false_positives': fp[i], - 'support': nt[c]}, - prefix=class_name) - - if self.comet_log_confusion_matrix: - epoch = self.experiment.curr_epoch - class_names = list(self.class_names.values()) - class_names.append("background") - num_classes = len(class_names) - - self.experiment.log_confusion_matrix( - matrix=confusion_matrix.matrix, - max_categories=num_classes, - labels=class_names, - epoch=epoch, - column_label='Actual Category', - row_label='Predicted Category', - file_name=f"confusion-matrix-epoch-{epoch}.json", - ) - - def on_fit_epoch_end(self, result, epoch): - self.log_metrics(result, epoch=epoch) - - def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): - if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: - self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) - - def on_params_update(self, params): - self.log_parameters(params) - - def finish_run(self): - self.experiment.end() diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py deleted file mode 100644 index 3cbd451..0000000 --- a/utils/loggers/comet/comet_utils.py +++ /dev/null @@ -1,150 +0,0 @@ -import logging -import os -from urllib.parse import urlparse - -try: - import comet_ml -except (ModuleNotFoundError, ImportError): - comet_ml = None - -import yaml - -logger = logging.getLogger(__name__) - -COMET_PREFIX = "comet://" -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") -COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt") - - -def download_model_checkpoint(opt, experiment): - model_dir = f"{opt.project}/{experiment.name}" - os.makedirs(model_dir, exist_ok=True) - - model_name = COMET_MODEL_NAME - model_asset_list = experiment.get_model_asset_list(model_name) - - if len(model_asset_list) == 0: - logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}") - return - - model_asset_list = sorted( - model_asset_list, - key=lambda x: x["step"], - reverse=True, - ) - logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list} - - resource_url = urlparse(opt.weights) - checkpoint_filename = resource_url.query - - if checkpoint_filename: - asset_id = logged_checkpoint_map.get(checkpoint_filename) - else: - asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME) - checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME - - if asset_id is None: - logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment") - return - - try: - logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}") - asset_filename = checkpoint_filename - - model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) - model_download_path = f"{model_dir}/{asset_filename}" - with open(model_download_path, "wb") as f: - f.write(model_binary) - - opt.weights = model_download_path - - except Exception as e: - logger.warning("COMET WARNING: Unable to download checkpoint from Comet") - logger.exception(e) - - -def set_opt_parameters(opt, experiment): - """Update the opts Namespace with parameters - from Comet's ExistingExperiment when resuming a run - - Args: - opt (argparse.Namespace): Namespace of command line options - experiment (comet_ml.APIExperiment): Comet API Experiment object - """ - asset_list = experiment.get_asset_list() - resume_string = opt.resume - - for asset in asset_list: - if asset["fileName"] == "opt.yaml": - asset_id = asset["assetId"] - asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) - opt_dict = yaml.safe_load(asset_binary) - for key, value in opt_dict.items(): - setattr(opt, key, value) - opt.resume = resume_string - - # Save hyperparameters to YAML file - # Necessary to pass checks in training script - save_dir = f"{opt.project}/{experiment.name}" - os.makedirs(save_dir, exist_ok=True) - - hyp_yaml_path = f"{save_dir}/hyp.yaml" - with open(hyp_yaml_path, "w") as f: - yaml.dump(opt.hyp, f) - opt.hyp = hyp_yaml_path - - -def check_comet_weights(opt): - """Downloads model weights from Comet and updates the - weights path to point to saved weights location - - Args: - opt (argparse.Namespace): Command Line arguments passed - to YOLOv5 training script - - Returns: - None/bool: Return True if weights are successfully downloaded - else return None - """ - if comet_ml is None: - return - - if isinstance(opt.weights, str): - if opt.weights.startswith(COMET_PREFIX): - api = comet_ml.API() - resource = urlparse(opt.weights) - experiment_path = f"{resource.netloc}{resource.path}" - experiment = api.get(experiment_path) - download_model_checkpoint(opt, experiment) - return True - - return None - - -def check_comet_resume(opt): - """Restores run parameters to its original state based on the model checkpoint - and logged Experiment parameters. - - Args: - opt (argparse.Namespace): Command Line arguments passed - to YOLOv5 training script - - Returns: - None/bool: Return True if the run is restored successfully - else return None - """ - if comet_ml is None: - return - - if isinstance(opt.resume, str): - if opt.resume.startswith(COMET_PREFIX): - api = comet_ml.API() - resource = urlparse(opt.resume) - experiment_path = f"{resource.netloc}{resource.path}" - experiment = api.get(experiment_path) - set_opt_parameters(opt, experiment) - download_model_checkpoint(opt, experiment) - - return True - - return None diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py deleted file mode 100644 index 7dd5c92..0000000 --- a/utils/loggers/comet/hpo.py +++ /dev/null @@ -1,118 +0,0 @@ -import argparse -import json -import logging -import os -import sys -from pathlib import Path - -import comet_ml - -logger = logging.getLogger(__name__) - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from train import train -from utils.callbacks import Callbacks -from utils.general import increment_path -from utils.torch_utils import select_device - -# Project Configuration -config = comet_ml.config.get_config() -COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") - - -def get_args(known=False): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') - parser.add_argument('--noplots', action='store_true', help='save no plot files') - parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') - parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--quad', action='store_true', help='quad dataloader') - parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') - parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') - parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') - - # Weights & Biases arguments - parser.add_argument('--entity', default=None, help='W&B: Entity') - parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') - parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') - parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') - - # Comet Arguments - parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.") - parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.") - parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.") - parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.") - parser.add_argument("--comet_optimizer_workers", - type=int, - default=1, - help="Comet: Number of Parallel Workers to use with the Comet Optimizer.") - - return parser.parse_known_args()[0] if known else parser.parse_args() - - -def run(parameters, opt): - hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} - - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.batch_size = parameters.get("batch_size") - opt.epochs = parameters.get("epochs") - - device = select_device(opt.device, batch_size=opt.batch_size) - train(hyp_dict, opt, device, callbacks=Callbacks()) - - -if __name__ == "__main__": - opt = get_args(known=True) - - opt.weights = str(opt.weights) - opt.cfg = str(opt.cfg) - opt.data = str(opt.data) - opt.project = str(opt.project) - - optimizer_id = os.getenv("COMET_OPTIMIZER_ID") - if optimizer_id is None: - with open(opt.comet_optimizer_config) as f: - optimizer_config = json.load(f) - optimizer = comet_ml.Optimizer(optimizer_config) - else: - optimizer = comet_ml.Optimizer(optimizer_id) - - opt.comet_optimizer_id = optimizer.id - status = optimizer.status() - - opt.comet_optimizer_objective = status["spec"]["objective"] - opt.comet_optimizer_metric = status["spec"]["metric"] - - logger.info("COMET INFO: Starting Hyperparameter Sweep") - for parameter in optimizer.get_parameters(): - run(parameter["parameters"], opt) diff --git a/utils/loggers/comet/optimizer_config.json b/utils/loggers/comet/optimizer_config.json deleted file mode 100644 index 83dddda..0000000 --- a/utils/loggers/comet/optimizer_config.json +++ /dev/null @@ -1,209 +0,0 @@ -{ - "algorithm": "random", - "parameters": { - "anchor_t": { - "type": "discrete", - "values": [ - 2, - 8 - ] - }, - "batch_size": { - "type": "discrete", - "values": [ - 16, - 32, - 64 - ] - }, - "box": { - "type": "discrete", - "values": [ - 0.02, - 0.2 - ] - }, - "cls": { - "type": "discrete", - "values": [ - 0.2 - ] - }, - "cls_pw": { - "type": "discrete", - "values": [ - 0.5 - ] - }, - "copy_paste": { - "type": "discrete", - "values": [ - 1 - ] - }, - "degrees": { - "type": "discrete", - "values": [ - 0, - 45 - ] - }, - "epochs": { - "type": "discrete", - "values": [ - 5 - ] - }, - "fl_gamma": { - "type": "discrete", - "values": [ - 0 - ] - }, - "fliplr": { - "type": "discrete", - "values": [ - 0 - ] - }, - "flipud": { - "type": "discrete", - "values": [ - 0 - ] - }, - "hsv_h": { - "type": "discrete", - "values": [ - 0 - ] - }, - "hsv_s": { - "type": "discrete", - "values": [ - 0 - ] - }, - "hsv_v": { - "type": "discrete", - "values": [ - 0 - ] - }, - "iou_t": { - "type": "discrete", - "values": [ - 0.7 - ] - }, - "lr0": { - "type": "discrete", - "values": [ - 1e-05, - 0.1 - ] - }, - "lrf": { - "type": "discrete", - "values": [ - 0.01, - 1 - ] - }, - "mixup": { - "type": "discrete", - "values": [ - 1 - ] - }, - "momentum": { - "type": "discrete", - "values": [ - 0.6 - ] - }, - "mosaic": { - "type": "discrete", - "values": [ - 0 - ] - }, - "obj": { - "type": "discrete", - "values": [ - 0.2 - ] - }, - "obj_pw": { - "type": "discrete", - "values": [ - 0.5 - ] - }, - "optimizer": { - "type": "categorical", - "values": [ - "SGD", - "Adam", - "AdamW" - ] - }, - "perspective": { - "type": "discrete", - "values": [ - 0 - ] - }, - "scale": { - "type": "discrete", - "values": [ - 0 - ] - }, - "shear": { - "type": "discrete", - "values": [ - 0 - ] - }, - "translate": { - "type": "discrete", - "values": [ - 0 - ] - }, - "warmup_bias_lr": { - "type": "discrete", - "values": [ - 0, - 0.2 - ] - }, - "warmup_epochs": { - "type": "discrete", - "values": [ - 5 - ] - }, - "warmup_momentum": { - "type": "discrete", - "values": [ - 0, - 0.95 - ] - }, - "weight_decay": { - "type": "discrete", - "values": [ - 0, - 0.001 - ] - } - }, - "spec": { - "maxCombo": 0, - "metric": "metrics/mAP_0.5", - "objective": "maximize" - }, - "trials": 1 -} diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md deleted file mode 100644 index d78324b..0000000 --- a/utils/loggers/wandb/README.md +++ /dev/null @@ -1,162 +0,0 @@ -📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. UPDATED 29 September 2021. - -- [About Weights & Biases](#about-weights-&-biases) -- [First-Time Setup](#first-time-setup) -- [Viewing runs](#viewing-runs) -- [Disabling wandb](#disabling-wandb) -- [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) -- [Reports: Share your work with the world!](#reports) - -## About Weights & Biases - -Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models — architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. - -Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: - -- [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time -- [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically -- [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization -- [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators -- [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently -- [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models - -## First-Time Setup - -
- Toggle Details -When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. - -W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: - -```shell -$ python train.py --project ... --name ... -``` - -YOLOv5 notebook example: Open In Colab Open In Kaggle -Screen Shot 2021-09-29 at 10 23 13 PM - -
- -## Viewing Runs - -
- Toggle Details -Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: - -- Training & Validation losses -- Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 -- Learning Rate over time -- A bounding box debugging panel, showing the training progress over time -- GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** -- System: Disk I/0, CPU utilization, RAM memory usage -- Your trained model as W&B Artifact -- Environment: OS and Python types, Git repository and state, **training command** - -

Weights & Biases dashboard

-
- -## Disabling wandb - -- training after running `wandb disabled` inside that directory creates no wandb run - ![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) - -- To enable wandb again, run `wandb online` - ![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) - -## Advanced Usage - -You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. - -
-

1: Train and Log Evaluation simultaneousy

- This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table - Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, - so no images will be uploaded from your system more than once. -
- Usage - Code $ python train.py --upload_data val - -![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png) - -
- -

2. Visualize and Version Datasets

- Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. -
- Usage - Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. - -![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) - -
- -

3: Train using dataset artifact

- When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that - can be used to train a model directly from the dataset artifact. This also logs evaluation -
- Usage - Code $ python train.py --data {data}_wandb.yaml - -![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) - -
- -

4: Save model checkpoints as artifacts

- To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. - You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged - -
- Usage - Code $ python train.py --save_period 1 - -![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) - -
- -
- -

5: Resume runs from checkpoint artifacts.

-Any run can be resumed using artifacts if the --resume argument starts with wandb-artifact:// prefix followed by the run path, i.e, wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. - -
- Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
- -

6: Resume runs from dataset artifact & checkpoint artifacts.

- Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device - The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or - train from _wandb.yaml file and set --save_period - -
- Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
- - - -

Reports

-W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). - -Weights & Biases Reports - -## Environments - -YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - -- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) -- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - -## Status - -![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/utils/loggers/wandb/__init__.py b/utils/loggers/wandb/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py deleted file mode 100644 index 06e81fb..0000000 --- a/utils/loggers/wandb/log_dataset.py +++ /dev/null @@ -1,27 +0,0 @@ -import argparse - -from wandb_utils import WandbLogger - -from utils.general import LOGGER - -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def create_dataset_artifact(opt): - logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused - if not logger.wandb: - LOGGER.info("install wandb using `pip install wandb` to log the dataset") - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') - parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') - parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') - parser.add_argument('--entity', default=None, help='W&B entity') - parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') - - opt = parser.parse_args() - opt.resume = False # Explicitly disallow resume check for dataset upload job - - create_dataset_artifact(opt) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py deleted file mode 100644 index d49ea6f..0000000 --- a/utils/loggers/wandb/sweep.py +++ /dev/null @@ -1,41 +0,0 @@ -import sys -from pathlib import Path - -import wandb - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from train import parse_opt, train -from utils.callbacks import Callbacks -from utils.general import increment_path -from utils.torch_utils import select_device - - -def sweep(): - wandb.init() - # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb. - hyp_dict = vars(wandb.config).get("_items").copy() - - # Workaround: get necessary opt args - opt = parse_opt(known=True) - opt.batch_size = hyp_dict.get("batch_size") - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.epochs = hyp_dict.get("epochs") - opt.nosave = True - opt.data = hyp_dict.get("data") - opt.weights = str(opt.weights) - opt.cfg = str(opt.cfg) - opt.data = str(opt.data) - opt.hyp = str(opt.hyp) - opt.project = str(opt.project) - device = select_device(opt.device, batch_size=opt.batch_size) - - # train - train(hyp_dict, opt, device, callbacks=Callbacks()) - - -if __name__ == "__main__": - sweep() diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml deleted file mode 100644 index 688b1ea..0000000 --- a/utils/loggers/wandb/sweep.yaml +++ /dev/null @@ -1,143 +0,0 @@ -# Hyperparameters for training -# To set range- -# Provide min and max values as: -# parameter: -# -# min: scalar -# max: scalar -# OR -# -# Set a specific list of search space- -# parameter: -# values: [scalar1, scalar2, scalar3...] -# -# You can use grid, bayesian and hyperopt search strategy -# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration - -program: utils/loggers/wandb/sweep.py -method: random -metric: - name: metrics/mAP_0.5 - goal: maximize - -parameters: - # hyperparameters: set either min, max range or values list - data: - value: "data/coco128.yaml" - batch_size: - values: [64] - epochs: - values: [10] - - lr0: - distribution: uniform - min: 1e-5 - max: 1e-1 - lrf: - distribution: uniform - min: 0.01 - max: 1.0 - momentum: - distribution: uniform - min: 0.6 - max: 0.98 - weight_decay: - distribution: uniform - min: 0.0 - max: 0.001 - warmup_epochs: - distribution: uniform - min: 0.0 - max: 5.0 - warmup_momentum: - distribution: uniform - min: 0.0 - max: 0.95 - warmup_bias_lr: - distribution: uniform - min: 0.0 - max: 0.2 - box: - distribution: uniform - min: 0.02 - max: 0.2 - cls: - distribution: uniform - min: 0.2 - max: 4.0 - cls_pw: - distribution: uniform - min: 0.5 - max: 2.0 - obj: - distribution: uniform - min: 0.2 - max: 4.0 - obj_pw: - distribution: uniform - min: 0.5 - max: 2.0 - iou_t: - distribution: uniform - min: 0.1 - max: 0.7 - anchor_t: - distribution: uniform - min: 2.0 - max: 8.0 - fl_gamma: - distribution: uniform - min: 0.0 - max: 4.0 - hsv_h: - distribution: uniform - min: 0.0 - max: 0.1 - hsv_s: - distribution: uniform - min: 0.0 - max: 0.9 - hsv_v: - distribution: uniform - min: 0.0 - max: 0.9 - degrees: - distribution: uniform - min: 0.0 - max: 45.0 - translate: - distribution: uniform - min: 0.0 - max: 0.9 - scale: - distribution: uniform - min: 0.0 - max: 0.9 - shear: - distribution: uniform - min: 0.0 - max: 10.0 - perspective: - distribution: uniform - min: 0.0 - max: 0.001 - flipud: - distribution: uniform - min: 0.0 - max: 1.0 - fliplr: - distribution: uniform - min: 0.0 - max: 1.0 - mosaic: - distribution: uniform - min: 0.0 - max: 1.0 - mixup: - distribution: uniform - min: 0.0 - max: 1.0 - copy_paste: - distribution: uniform - min: 0.0 - max: 1.0 diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py deleted file mode 100644 index 238f4ed..0000000 --- a/utils/loggers/wandb/wandb_utils.py +++ /dev/null @@ -1,589 +0,0 @@ -"""Utilities and tools for tracking runs with Weights & Biases.""" - -import logging -import os -import sys -from contextlib import contextmanager -from pathlib import Path -from typing import Dict - -import yaml -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from utils.dataloaders import LoadImagesAndLabels, img2label_paths -from utils.general import LOGGER, check_dataset, check_file - -try: - import wandb - - assert hasattr(wandb, '__version__') # verify package import not local dir -except (ImportError, AssertionError): - wandb = None - -RANK = int(os.getenv('RANK', -1)) -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): - return from_string[len(prefix):] - - -def check_wandb_config_file(data_config_file): - wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path - if Path(wandb_config).is_file(): - return wandb_config - return data_config_file - - -def check_wandb_dataset(data_file): - is_trainset_wandb_artifact = False - is_valset_wandb_artifact = False - if isinstance(data_file, dict): - # In that case another dataset manager has already processed it and we don't have to - return data_file - if check_file(data_file) and data_file.endswith('.yaml'): - with open(data_file, errors='ignore') as f: - data_dict = yaml.safe_load(f) - is_trainset_wandb_artifact = isinstance(data_dict['train'], - str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) - is_valset_wandb_artifact = isinstance(data_dict['val'], - str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX) - if is_trainset_wandb_artifact or is_valset_wandb_artifact: - return data_dict - else: - return check_dataset(data_file) - - -def get_run_info(run_path): - run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) - run_id = run_path.stem - project = run_path.parent.stem - entity = run_path.parent.parent.stem - model_artifact_name = 'run_' + run_id + '_model' - return entity, project, run_id, model_artifact_name - - -def check_wandb_resume(opt): - process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None - if isinstance(opt.resume, str): - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - if RANK not in [-1, 0]: # For resuming DDP runs - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - api = wandb.Api() - artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') - modeldir = artifact.download() - opt.weights = str(Path(modeldir) / "last.pt") - return True - return None - - -def process_wandb_config_ddp_mode(opt): - with open(check_file(opt.data), errors='ignore') as f: - data_dict = yaml.safe_load(f) # data dict - train_dir, val_dir = None, None - if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) - train_dir = train_artifact.download() - train_path = Path(train_dir) / 'data/images/' - data_dict['train'] = str(train_path) - - if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) - val_dir = val_artifact.download() - val_path = Path(val_dir) / 'data/images/' - data_dict['val'] = str(val_path) - if train_dir or val_dir: - ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') - with open(ddp_data_path, 'w') as f: - yaml.safe_dump(data_dict, f) - opt.data = ddp_data_path - - -class WandbLogger(): - """Log training runs, datasets, models, and predictions to Weights & Biases. - - This logger sends information to W&B at wandb.ai. By default, this information - includes hyperparameters, system configuration and metrics, model metrics, - and basic data metrics and analyses. - - By providing additional command line arguments to train.py, datasets, - models and predictions can also be logged. - - For more on how this logger is used, see the Weights & Biases documentation: - https://docs.wandb.com/guides/integrations/yolov5 - """ - - def __init__(self, opt, run_id=None, job_type='Training'): - """ - - Initialize WandbLogger instance - - Upload dataset if opt.upload_dataset is True - - Setup training processes if job_type is 'Training' - - arguments: - opt (namespace) -- Commandline arguments for this run - run_id (str) -- Run ID of W&B run to be resumed - job_type (str) -- To set the job_type for this run - - """ - # Temporary-fix - if opt.upload_dataset: - opt.upload_dataset = False - # LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") - - # Pre-training routine -- - self.job_type = job_type - self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run - self.val_artifact, self.train_artifact = None, None - self.train_artifact_path, self.val_artifact_path = None, None - self.result_artifact = None - self.val_table, self.result_table = None, None - self.bbox_media_panel_images = [] - self.val_table_path_map = None - self.max_imgs_to_log = 16 - self.wandb_artifact_data_dict = None - self.data_dict = None - # It's more elegant to stick to 1 wandb.init call, - # but useful config data is overwritten in the WandbLogger's wandb.init call - if isinstance(opt.resume, str): # checks resume from artifact - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name - assert wandb, 'install wandb to resume wandb runs' - # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config - self.wandb_run = wandb.init(id=run_id, - project=project, - entity=entity, - resume='allow', - allow_val_change=True) - opt.resume = model_artifact_name - elif self.wandb: - self.wandb_run = wandb.init(config=opt, - resume="allow", - project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, - entity=opt.entity, - name=opt.name if opt.name != 'exp' else None, - job_type=job_type, - id=run_id, - allow_val_change=True) if not wandb.run else wandb.run - if self.wandb_run: - if self.job_type == 'Training': - if opt.upload_dataset: - if not opt.resume: - self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) - - if isinstance(opt.data, dict): - # This means another dataset manager has already processed the dataset info (e.g. ClearML) - # and they will have stored the already processed dict in opt.data - self.data_dict = opt.data - elif opt.resume: - # resume from artifact - if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - self.data_dict = dict(self.wandb_run.config.data_dict) - else: # local resume - self.data_dict = check_wandb_dataset(opt.data) - else: - self.data_dict = check_wandb_dataset(opt.data) - self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict - - # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. - self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True) - self.setup_training(opt) - - if self.job_type == 'Dataset Creation': - self.wandb_run.config.update({"upload_dataset": True}) - self.data_dict = self.check_and_upload_dataset(opt) - - def check_and_upload_dataset(self, opt): - """ - Check if the dataset format is compatible and upload it as W&B artifact - - arguments: - opt (namespace)-- Commandline arguments for current run - - returns: - Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. - """ - assert wandb, 'Install wandb to upload dataset' - config_path = self.log_dataset_artifact(opt.data, opt.single_cls, - 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) - with open(config_path, errors='ignore') as f: - wandb_data_dict = yaml.safe_load(f) - return wandb_data_dict - - def setup_training(self, opt): - """ - Setup the necessary processes for training YOLO models: - - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded - - Setup log_dict, initialize bbox_interval - - arguments: - opt (namespace) -- commandline arguments for this run - - """ - self.log_dict, self.current_epoch = {}, 0 - self.bbox_interval = opt.bbox_interval - if isinstance(opt.resume, str): - modeldir, _ = self.download_model_artifact(opt) - if modeldir: - self.weights = Path(modeldir) / "last.pt" - config = self.wandb_run.config - opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( - self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs,\ - config.hyp, config.imgsz - data_dict = self.data_dict - if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download - self.train_artifact_path, self.train_artifact = self.download_dataset_artifact( - data_dict.get('train'), opt.artifact_alias) - self.val_artifact_path, self.val_artifact = self.download_dataset_artifact( - data_dict.get('val'), opt.artifact_alias) - - if self.train_artifact_path is not None: - train_path = Path(self.train_artifact_path) / 'data/images/' - data_dict['train'] = str(train_path) - if self.val_artifact_path is not None: - val_path = Path(self.val_artifact_path) / 'data/images/' - data_dict['val'] = str(val_path) - - if self.val_artifact is not None: - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - columns = ["epoch", "id", "ground truth", "prediction"] - columns.extend(self.data_dict['names']) - self.result_table = wandb.Table(columns) - self.val_table = self.val_artifact.get("val") - if self.val_table_path_map is None: - self.map_val_table_path() - if opt.bbox_interval == -1: - self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 - if opt.evolve or opt.noplots: - self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval - train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None - # Update the the data_dict to point to local artifacts dir - if train_from_artifact: - self.data_dict = data_dict - - def download_dataset_artifact(self, path, alias): - """ - download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX - - arguments: - path -- path of the dataset to be used for training - alias (str)-- alias of the artifact to be download/used for training - - returns: - (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset - is found otherwise returns (None, None) - """ - if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): - artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) - dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) - assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" - datadir = dataset_artifact.download() - return datadir, dataset_artifact - return None, None - - def download_model_artifact(self, opt): - """ - download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX - - arguments: - opt (namespace) -- Commandline arguments for this run - """ - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") - assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' - modeldir = model_artifact.download() - # epochs_trained = model_artifact.metadata.get('epochs_trained') - total_epochs = model_artifact.metadata.get('total_epochs') - is_finished = total_epochs is None - assert not is_finished, 'training is finished, can only resume incomplete runs.' - return modeldir, model_artifact - return None, None - - def log_model(self, path, opt, epoch, fitness_score, best_model=False): - """ - Log the model checkpoint as W&B artifact - - arguments: - path (Path) -- Path of directory containing the checkpoints - opt (namespace) -- Command line arguments for this run - epoch (int) -- Current epoch number - fitness_score (float) -- fitness score for current epoch - best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. - """ - model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', - type='model', - metadata={ - 'original_url': str(path), - 'epochs_trained': epoch + 1, - 'save period': opt.save_period, - 'project': opt.project, - 'total_epochs': opt.epochs, - 'fitness_score': fitness_score}) - model_artifact.add_file(str(path / 'last.pt'), name='last.pt') - wandb.log_artifact(model_artifact, - aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) - LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") - - def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): - """ - Log the dataset as W&B artifact and return the new data file with W&B links - - arguments: - data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. - single_class (boolean) -- train multi-class data as single-class - project (str) -- project name. Used to construct the artifact path - overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new - file with _wandb postfix. Eg -> data_wandb.yaml - - returns: - the new .yaml file with artifact links. it can be used to start training directly from artifacts - """ - upload_dataset = self.wandb_run.config.upload_dataset - log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val' - self.data_dict = check_dataset(data_file) # parse and check - data = dict(self.data_dict) - nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) - names = {k: v for k, v in enumerate(names)} # to index dictionary - - # log train set - if not log_val_only: - self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(data['train'], rect=True, batch_size=1), - names, - name='train') if data.get('train') else None - if data.get('train'): - data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') - - self.val_artifact = self.create_dataset_table( - LoadImagesAndLabels(data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None - if data.get('val'): - data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') - - path = Path(data_file) - # create a _wandb.yaml file with artifacts links if both train and test set are logged - if not log_val_only: - path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path - path = ROOT / 'data' / path - data.pop('download', None) - data.pop('path', None) - with open(path, 'w') as f: - yaml.safe_dump(data, f) - LOGGER.info(f"Created dataset config file {path}") - - if self.job_type == 'Training': # builds correct artifact pipeline graph - if not log_val_only: - self.wandb_run.log_artifact( - self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED! - self.wandb_run.use_artifact(self.val_artifact) - self.val_artifact.wait() - self.val_table = self.val_artifact.get('val') - self.map_val_table_path() - else: - self.wandb_run.log_artifact(self.train_artifact) - self.wandb_run.log_artifact(self.val_artifact) - return path - - def map_val_table_path(self): - """ - Map the validation dataset Table like name of file -> it's id in the W&B Table. - Useful for - referencing artifacts for evaluation. - """ - self.val_table_path_map = {} - LOGGER.info("Mapping dataset") - for i, data in enumerate(tqdm(self.val_table.data)): - self.val_table_path_map[data[3]] = data[0] - - def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): - """ - Create and return W&B artifact containing W&B Table of the dataset. - - arguments: - dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table - class_to_id -- hash map that maps class ids to labels - name -- name of the artifact - - returns: - dataset artifact to be logged or used - """ - # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging - artifact = wandb.Artifact(name=name, type="dataset") - img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None - img_files = tqdm(dataset.im_files) if not img_files else img_files - for img_file in img_files: - if Path(img_file).is_dir(): - artifact.add_dir(img_file, name='data/images') - labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) - artifact.add_dir(labels_path, name='data/labels') - else: - artifact.add_file(img_file, name='data/images/' + Path(img_file).name) - label_file = Path(img2label_paths([img_file])[0]) - artifact.add_file(str(label_file), name='data/labels/' + - label_file.name) if label_file.exists() else None - table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) - for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): - box_data, img_classes = [], {} - for cls, *xywh in labels[:, 1:].tolist(): - cls = int(cls) - box_data.append({ - "position": { - "middle": [xywh[0], xywh[1]], - "width": xywh[2], - "height": xywh[3]}, - "class_id": cls, - "box_caption": "%s" % (class_to_id[cls])}) - img_classes[cls] = class_to_id[cls] - boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space - table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), - Path(paths).name) - artifact.add(table, name) - return artifact - - def log_training_progress(self, predn, path, names): - """ - Build evaluation Table. Uses reference from validation dataset table. - - arguments: - predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image - names (dict(int, str)): hash map that maps class ids to labels - """ - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) - box_data = [] - avg_conf_per_class = [0] * len(self.data_dict['names']) - pred_class_count = {} - for *xyxy, conf, cls in predn.tolist(): - if conf >= 0.25: - cls = int(cls) - box_data.append({ - "position": { - "minX": xyxy[0], - "minY": xyxy[1], - "maxX": xyxy[2], - "maxY": xyxy[3]}, - "class_id": cls, - "box_caption": f"{names[cls]} {conf:.3f}", - "scores": { - "class_score": conf}, - "domain": "pixel"}) - avg_conf_per_class[cls] += conf - - if cls in pred_class_count: - pred_class_count[cls] += 1 - else: - pred_class_count[cls] = 1 - - for pred_class in pred_class_count.keys(): - avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] - - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - id = self.val_table_path_map[Path(path).name] - self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], - wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), - *avg_conf_per_class) - - def val_one_image(self, pred, predn, path, names, im): - """ - Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel - - arguments: - pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] - predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image - """ - if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact - self.log_training_progress(predn, path, names) - - if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: - if self.current_epoch % self.bbox_interval == 0: - box_data = [{ - "position": { - "minX": xyxy[0], - "minY": xyxy[1], - "maxX": xyxy[2], - "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": f"{names[int(cls)]} {conf:.3f}", - "scores": { - "class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) - - def log(self, log_dict): - """ - save the metrics to the logging dictionary - - arguments: - log_dict (Dict) -- metrics/media to be logged in current step - """ - if self.wandb_run: - for key, value in log_dict.items(): - self.log_dict[key] = value - - def end_epoch(self, best_result=False): - """ - commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. - - arguments: - best_result (boolean): Boolean representing if the result of this evaluation is best or not - """ - if self.wandb_run: - with all_logging_disabled(): - if self.bbox_media_panel_images: - self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images - try: - wandb.log(self.log_dict) - except BaseException as e: - LOGGER.info( - f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}" - ) - self.wandb_run.finish() - self.wandb_run = None - - self.log_dict = {} - self.bbox_media_panel_images = [] - if self.result_artifact: - self.result_artifact.add(self.result_table, 'result') - wandb.log_artifact(self.result_artifact, - aliases=[ - 'latest', 'last', 'epoch ' + str(self.current_epoch), - ('best' if best_result else '')]) - - wandb.log({"evaluation": self.result_table}) - columns = ["epoch", "id", "ground truth", "prediction"] - columns.extend(self.data_dict['names']) - self.result_table = wandb.Table(columns) - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - - def finish_run(self): - """ - Log metrics if any and finish the current W&B run - """ - if self.wandb_run: - if self.log_dict: - with all_logging_disabled(): - wandb.log(self.log_dict) - wandb.run.finish() - - -@contextmanager -def all_logging_disabled(highest_level=logging.CRITICAL): - """ source - https://gist.github.com/simon-weber/7853144 - A context manager that will prevent any logging messages triggered during the body from being processed. - :param highest_level: the maximum logging level in use. - This would only need to be changed if a custom level greater than CRITICAL is defined. - """ - previous_level = logging.root.manager.disable - logging.disable(highest_level) - try: - yield - finally: - logging.disable(previous_level) diff --git a/utils/loss.py b/utils/loss.py deleted file mode 100644 index 9b9c3d9..0000000 --- a/utils/loss.py +++ /dev/null @@ -1,234 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Loss functions -""" - -import torch -import torch.nn as nn - -from utils.metrics import bbox_iou -from utils.torch_utils import de_parallel - - -def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 - # return positive, negative label smoothing BCE targets - return 1.0 - 0.5 * eps, 0.5 * eps - - -class BCEBlurWithLogitsLoss(nn.Module): - # BCEwithLogitLoss() with reduced missing label effects. - def __init__(self, alpha=0.05): - super().__init__() - self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() - self.alpha = alpha - - def forward(self, pred, true): - loss = self.loss_fcn(pred, true) - pred = torch.sigmoid(pred) # prob from logits - dx = pred - true # reduce only missing label effects - # dx = (pred - true).abs() # reduce missing label and false label effects - alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) - loss *= alpha_factor - return loss.mean() - - -class FocalLoss(nn.Module): - # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) - def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super().__init__() - self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() - self.gamma = gamma - self.alpha = alpha - self.reduction = loss_fcn.reduction - self.loss_fcn.reduction = 'none' # required to apply FL to each element - - def forward(self, pred, true): - loss = self.loss_fcn(pred, true) - # p_t = torch.exp(-loss) - # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability - - # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py - pred_prob = torch.sigmoid(pred) # prob from logits - p_t = true * pred_prob + (1 - true) * (1 - pred_prob) - alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) - modulating_factor = (1.0 - p_t) ** self.gamma - loss *= alpha_factor * modulating_factor - - if self.reduction == 'mean': - return loss.mean() - elif self.reduction == 'sum': - return loss.sum() - else: # 'none' - return loss - - -class QFocalLoss(nn.Module): - # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) - def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super().__init__() - self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() - self.gamma = gamma - self.alpha = alpha - self.reduction = loss_fcn.reduction - self.loss_fcn.reduction = 'none' # required to apply FL to each element - - def forward(self, pred, true): - loss = self.loss_fcn(pred, true) - - pred_prob = torch.sigmoid(pred) # prob from logits - alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) - modulating_factor = torch.abs(true - pred_prob) ** self.gamma - loss *= alpha_factor * modulating_factor - - if self.reduction == 'mean': - return loss.mean() - elif self.reduction == 'sum': - return loss.sum() - else: # 'none' - return loss - - -class ComputeLoss: - sort_obj_iou = False - - # Compute losses - def __init__(self, model, autobalance=False): - device = next(model.parameters()).device # get model device - h = model.hyp # hyperparameters - - # Define criteria - BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) - BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) - - # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets - - # Focal loss - g = h['fl_gamma'] # focal loss gamma - if g > 0: - BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - - m = de_parallel(model).model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 - self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index - self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance - self.na = m.na # number of anchors - self.nc = m.nc # number of classes - self.nl = m.nl # number of layers - self.anchors = m.anchors - self.device = device - - def __call__(self, p, targets): # predictions, targets - lcls = torch.zeros(1, device=self.device) # class loss - lbox = torch.zeros(1, device=self.device) # box loss - lobj = torch.zeros(1, device=self.device) # object loss - tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets - - # Losses - for i, pi in enumerate(p): # layer index, layer predictions - b, a, gj, gi = indices[i] # image, anchor, gridy, gridx - tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj - - n = b.shape[0] # number of targets - if n: - # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 - pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions - - # Regression - pxy = pxy.sigmoid() * 2 - 0.5 - pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] - pbox = torch.cat((pxy, pwh), 1) # predicted box - iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) - lbox += (1.0 - iou).mean() # iou loss - - # Objectness - iou = iou.detach().clamp(0).type(tobj.dtype) - if self.sort_obj_iou: - j = iou.argsort() - b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] - if self.gr < 1: - iou = (1.0 - self.gr) + self.gr * iou - tobj[b, a, gj, gi] = iou # iou ratio - - # Classification - if self.nc > 1: # cls loss (only if multiple classes) - t = torch.full_like(pcls, self.cn, device=self.device) # targets - t[range(n), tcls[i]] = self.cp - lcls += self.BCEcls(pcls, t) # BCE - - # Append targets to text file - # with open('targets.txt', 'a') as file: - # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] - - obji = self.BCEobj(pi[..., 4], tobj) - lobj += obji * self.balance[i] # obj loss - if self.autobalance: - self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() - - if self.autobalance: - self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp['box'] - lobj *= self.hyp['obj'] - lcls *= self.hyp['cls'] - bs = tobj.shape[0] # batch size - - return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() - - def build_targets(self, p, targets): - # Build targets for compute_loss(), input targets(image,class,x,y,w,h) - na, nt = self.na, targets.shape[0] # number of anchors, targets - tcls, tbox, indices, anch = [], [], [], [] - gain = torch.ones(7, device=self.device) # normalized to gridspace gain - ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) - targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None]), 2) # append anchor indices - - g = 0.5 # bias - off = torch.tensor( - [ - [0, 0], - [1, 0], - [0, 1], - [-1, 0], - [0, -1], # j,k,l,m - # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], - device=self.device).float() * g # offsets - - for i in range(self.nl): - anchors, shape = self.anchors[i], p[i].shape - gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain - - # Match targets to anchors - t = targets * gain # shape(3,n,7) - if nt: - # Matches - r = t[..., 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare - # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) - t = t[j] # filter - - # Offsets - gxy = t[:, 2:4] # grid xy - gxi = gain[[2, 3]] - gxy # inverse - j, k = ((gxy % 1 < g) & (gxy > 1)).T - l, m = ((gxi % 1 < g) & (gxi > 1)).T - j = torch.stack((torch.ones_like(j), j, k, l, m)) - t = t.repeat((5, 1, 1))[j] - offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] - else: - t = targets[0] - offsets = 0 - - # Define - bc, gxy, gwh, a = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors - a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class - gij = (gxy - offsets).long() - gi, gj = gij.T # grid indices - - # Append - indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid - tbox.append(torch.cat((gxy - gij, gwh), 1)) # box - anch.append(anchors[a]) # anchors - tcls.append(c) # class - - return tcls, tbox, indices, anch diff --git a/utils/metrics.py b/utils/metrics.py deleted file mode 100644 index ed611d7..0000000 --- a/utils/metrics.py +++ /dev/null @@ -1,368 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Model validation metrics -""" - -import math -import warnings -from pathlib import Path - -import matplotlib.pyplot as plt -import numpy as np -import torch - -from utils import TryExcept, threaded - - -def fitness(x): - # Model fitness as a weighted combination of metrics - w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] - return (x[:, :4] * w).sum(1) - - -def smooth(y, f=0.05): - # Box filter of fraction f - nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) - p = np.ones(nf // 2) # ones padding - yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded - return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed - - -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=""): - """ Compute the average precision, given the recall and precision curves. - Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. - # Arguments - tp: True positives (nparray, nx1 or nx10). - conf: Objectness value from 0-1 (nparray). - pred_cls: Predicted object classes (nparray). - target_cls: True object classes (nparray). - plot: Plot precision-recall curve at mAP@0.5 - save_dir: Plot save directory - # Returns - The average precision as computed in py-faster-rcnn. - """ - - # Sort by objectness - i = np.argsort(-conf) - tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] - - # Find unique classes - unique_classes, nt = np.unique(target_cls, return_counts=True) - nc = unique_classes.shape[0] # number of classes, number of detections - - # Create Precision-Recall curve and compute AP for each class - px, py = np.linspace(0, 1, 1000), [] # for plotting - ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) - for ci, c in enumerate(unique_classes): - i = pred_cls == c - n_l = nt[ci] # number of labels - n_p = i.sum() # number of predictions - if n_p == 0 or n_l == 0: - continue - - # Accumulate FPs and TPs - fpc = (1 - tp[i]).cumsum(0) - tpc = tp[i].cumsum(0) - - # Recall - recall = tpc / (n_l + eps) # recall curve - r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases - - # Precision - precision = tpc / (tpc + fpc) # precision curve - p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score - - # AP from recall-precision curve - for j in range(tp.shape[1]): - ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) - if plot and j == 0: - py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 - - # Compute F1 (harmonic mean of precision and recall) - f1 = 2 * p * r / (p + r + eps) - names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data - names = dict(enumerate(names)) # to dict - if plot: - plot_pr_curve(px, py, ap, Path(save_dir) / f'{prefix}PR_curve.png', names) - plot_mc_curve(px, f1, Path(save_dir) / f'{prefix}F1_curve.png', names, ylabel='F1') - plot_mc_curve(px, p, Path(save_dir) / f'{prefix}P_curve.png', names, ylabel='Precision') - plot_mc_curve(px, r, Path(save_dir) / f'{prefix}R_curve.png', names, ylabel='Recall') - - i = smooth(f1.mean(0), 0.1).argmax() # max F1 index - p, r, f1 = p[:, i], r[:, i], f1[:, i] - tp = (r * nt).round() # true positives - fp = (tp / (p + eps) - tp).round() # false positives - return tp, fp, p, r, f1, ap, unique_classes.astype(int) - - -def compute_ap(recall, precision): - """ Compute the average precision, given the recall and precision curves - # Arguments - recall: The recall curve (list) - precision: The precision curve (list) - # Returns - Average precision, precision curve, recall curve - """ - - # Append sentinel values to beginning and end - mrec = np.concatenate(([0.0], recall, [1.0])) - mpre = np.concatenate(([1.0], precision, [0.0])) - - # Compute the precision envelope - mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) - - # Integrate area under curve - method = 'interp' # methods: 'continuous', 'interp' - if method == 'interp': - x = np.linspace(0, 1, 101) # 101-point interp (COCO) - ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate - else: # 'continuous' - i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes - ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve - - return ap, mpre, mrec - - -class ConfusionMatrix: - # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix - def __init__(self, nc, conf=0.25, iou_thres=0.45): - self.matrix = np.zeros((nc + 1, nc + 1)) - self.nc = nc # number of classes - self.conf = conf - self.iou_thres = iou_thres - - def process_batch(self, detections, labels): - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - detections (Array[N, 6]), x1, y1, x2, y2, conf, class - labels (Array[M, 5]), class, x1, y1, x2, y2 - Returns: - None, updates confusion matrix accordingly - """ - if detections is None: - gt_classes = labels.int() - for gc in gt_classes: - self.matrix[self.nc, gc] += 1 # background FN - return - - detections = detections[detections[:, 4] > self.conf] - gt_classes = labels[:, 0].int() - detection_classes = detections[:, 5].int() - iou = box_iou(labels[:, 1:], detections[:, :4]) - - x = torch.where(iou > self.iou_thres) - if x[0].shape[0]: - matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() - if x[0].shape[0] > 1: - matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 1], return_index=True)[1]] - matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 0], return_index=True)[1]] - else: - matches = np.zeros((0, 3)) - - n = matches.shape[0] > 0 - m0, m1, _ = matches.transpose().astype(int) - for i, gc in enumerate(gt_classes): - j = m0 == i - if n and sum(j) == 1: - self.matrix[detection_classes[m1[j]], gc] += 1 # correct - else: - self.matrix[self.nc, gc] += 1 # true background - - if n: - for i, dc in enumerate(detection_classes): - if not any(m1 == i): - self.matrix[dc, self.nc] += 1 # predicted background - - def matrix(self): - return self.matrix - - def tp_fp(self): - tp = self.matrix.diagonal() # true positives - fp = self.matrix.sum(1) - tp # false positives - # fn = self.matrix.sum(0) - tp # false negatives (missed detections) - return tp[:-1], fp[:-1] # remove background class - - @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure: ') - def plot(self, normalize=True, save_dir='', names=()): - import seaborn as sn - - array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns - array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) - - fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) - nc, nn = self.nc, len(names) # number of classes, names - sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size - labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels - ticklabels = (names + ['background']) if labels else "auto" - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered - sn.heatmap(array, - ax=ax, - annot=nc < 30, - annot_kws={ - "size": 8}, - cmap='Blues', - fmt='.2f', - square=True, - vmin=0.0, - xticklabels=ticklabels, - yticklabels=ticklabels).set_facecolor((1, 1, 1)) - ax.set_ylabel('True') - ax.set_ylabel('Predicted') - ax.set_title('Confusion Matrix') - fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) - plt.close(fig) - - def print(self): - for i in range(self.nc + 1): - print(' '.join(map(str, self.matrix[i]))) - - -def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): - # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4) - - # Get the coordinates of bounding boxes - if xywh: # transform from xywh to xyxy - (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, 1), box2.chunk(4, 1) - w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 - b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ - b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ - else: # x1, y1, x2, y2 = box1 - b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1) - b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1) - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 - - # Intersection area - inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ - (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) - - # Union Area - union = w1 * h1 + w2 * h2 - inter + eps - - # IoU - iou = inter / union - if CIoU or DIoU or GIoU: - cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width - ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height - if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 - c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared - rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 - if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2) - with torch.no_grad(): - alpha = v / (v - iou + (1 + eps)) - return iou - (rho2 / c2 + v * alpha) # CIoU - return iou - rho2 / c2 # DIoU - c_area = cw * ch + eps # convex area - return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf - return iou # IoU - - -def box_area(box): - # box = xyxy(4,n) - return (box[2] - box[0]) * (box[3] - box[1]) - - -def box_iou(box1, box2, eps=1e-7): - # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - box1 (Tensor[N, 4]) - box2 (Tensor[M, 4]) - Returns: - iou (Tensor[N, M]): the NxM matrix containing the pairwise - IoU values for every element in boxes1 and boxes2 - """ - - # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) - (a1, a2), (b1, b2) = box1[:, None].chunk(2, 2), box2.chunk(2, 1) - inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) - - # IoU = inter / (area1 + area2 - inter) - return inter / (box_area(box1.T)[:, None] + box_area(box2.T) - inter + eps) - - -def bbox_ioa(box1, box2, eps=1e-7): - """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 - box1: np.array of shape(4) - box2: np.array of shape(nx4) - returns: np.array of shape(n) - """ - - # Get the coordinates of bounding boxes - b1_x1, b1_y1, b1_x2, b1_y2 = box1 - b2_x1, b2_y1, b2_x2, b2_y2 = box2.T - - # Intersection area - inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ - (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) - - # box2 area - box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps - - # Intersection over box2 area - return inter_area / box2_area - - -def wh_iou(wh1, wh2, eps=1e-7): - # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 - wh1 = wh1[:, None] # [N,1,2] - wh2 = wh2[None] # [1,M,2] - inter = torch.min(wh1, wh2).prod(2) # [N,M] - return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps) # iou = inter / (area1 + area2 - inter) - - -# Plots ---------------------------------------------------------------------------------------------------------------- - - -@threaded -def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): - # Precision-recall curve - fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) - py = np.stack(py, axis=1) - - if 0 < len(names) < 21: # display per-class legend if < 21 classes - for i, y in enumerate(py.T): - ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) - else: - ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) - - ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) - ax.set_xlabel('Recall') - ax.set_ylabel('Precision') - ax.set_xlim(0, 1) - ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - ax.set_title('Precision-Recall Curve') - fig.savefig(save_dir, dpi=250) - plt.close(fig) - - -@threaded -def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'): - # Metric-confidence curve - fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) - - if 0 < len(names) < 21: # display per-class legend if < 21 classes - for i, y in enumerate(py): - ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) - else: - ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) - - y = smooth(py.mean(0), 0.05) - ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') - ax.set_xlabel(xlabel) - ax.set_ylabel(ylabel) - ax.set_xlim(0, 1) - ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - ax.set_title(f'{ylabel}-Confidence Curve') - fig.savefig(save_dir, dpi=250) - plt.close(fig) diff --git a/utils/plots.py b/utils/plots.py deleted file mode 100644 index 36df271..0000000 --- a/utils/plots.py +++ /dev/null @@ -1,575 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Plotting utils -""" - -import contextlib -import math -import os -from copy import copy -from pathlib import Path -from urllib.error import URLError - -import cv2 -import matplotlib -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import seaborn as sn -import torch -from PIL import Image, ImageDraw, ImageFont - -from utils import TryExcept, threaded -from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, - is_ascii, xywh2xyxy, xyxy2xywh) -from utils.metrics import fitness -from utils.segment.general import scale_image - -# Settings -RANK = int(os.getenv('RANK', -1)) -matplotlib.rc('font', **{'size': 11}) -matplotlib.use('Agg') # for writing to files only - - -class Colors: - # Ultralytics color palette https://ultralytics.com/ - def __init__(self): - # hex = matplotlib.colors.TABLEAU_COLORS.values() - hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', - '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') - self.palette = [self.hex2rgb(f'#{c}') for c in hexs] - self.n = len(self.palette) - - def __call__(self, i, bgr=False): - c = self.palette[int(i) % self.n] - return (c[2], c[1], c[0]) if bgr else c - - @staticmethod - def hex2rgb(h): # rgb order (PIL) - return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) - - -colors = Colors() # create instance for 'from utils.plots import colors' - - -def check_pil_font(font=FONT, size=10): - # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary - font = Path(font) - font = font if font.exists() else (CONFIG_DIR / font.name) - try: - return ImageFont.truetype(str(font) if font.exists() else font.name, size) - except Exception: # download if missing - try: - check_font(font) - return ImageFont.truetype(str(font), size) - except TypeError: - check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 - except URLError: # not online - return ImageFont.load_default() - - -class Annotator: - # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations - def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): - assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' - non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic - self.pil = pil or non_ascii - if self.pil: # use PIL - self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) - self.draw = ImageDraw.Draw(self.im) - self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font, - size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) - else: # use cv2 - self.im = im - self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width - - def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): - # Add one xyxy box to image with label - if self.pil or not is_ascii(label): - self.draw.rectangle(box, width=self.lw, outline=color) # box - if label: - w, h = self.font.getsize(label) # text width, height - outside = box[1] - h >= 0 # label fits outside box - self.draw.rectangle( - (box[0], box[1] - h if outside else box[1], box[0] + w + 1, - box[1] + 1 if outside else box[1] + h + 1), - fill=color, - ) - # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 - self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) - else: # cv2 - p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) - cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) - if label: - tf = max(self.lw - 1, 1) # font thickness - w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height - outside = p1[1] - h >= 3 - p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 - cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled - cv2.putText(self.im, - label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), - 0, - self.lw / 3, - txt_color, - thickness=tf, - lineType=cv2.LINE_AA) - - def masks(self, masks, colors, im_gpu=None, alpha=0.5): - """Plot masks at once. - Args: - masks (tensor): predicted masks on cuda, shape: [n, h, w] - colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n] - im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1] - alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque - """ - if self.pil: - # convert to numpy first - self.im = np.asarray(self.im).copy() - if im_gpu is None: - # Add multiple masks of shape(h,w,n) with colors list([r,g,b], [r,g,b], ...) - if len(masks) == 0: - return - if isinstance(masks, torch.Tensor): - masks = torch.as_tensor(masks, dtype=torch.uint8) - masks = masks.permute(1, 2, 0).contiguous() - masks = masks.cpu().numpy() - # masks = np.ascontiguousarray(masks.transpose(1, 2, 0)) - masks = scale_image(masks.shape[:2], masks, self.im.shape) - masks = np.asarray(masks, dtype=np.float32) - colors = np.asarray(colors, dtype=np.float32) # shape(n,3) - s = masks.sum(2, keepdims=True).clip(0, 1) # add all masks together - masks = (masks @ colors).clip(0, 255) # (h,w,n) @ (n,3) = (h,w,3) - self.im[:] = masks * alpha + self.im * (1 - s * alpha) - else: - if len(masks) == 0: - self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 - colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 - colors = colors[:, None, None] # shape(n,1,1,3) - masks = masks.unsqueeze(3) # shape(n,h,w,1) - masks_color = masks * (colors * alpha) # shape(n,h,w,3) - - inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) - mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) - - im_gpu = im_gpu.flip(dims=[0]) # flip channel - im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) - im_gpu = im_gpu * inv_alph_masks[-1] + mcs - im_mask = (im_gpu * 255).byte().cpu().numpy() - self.im[:] = scale_image(im_gpu.shape, im_mask, self.im.shape) - if self.pil: - # convert im back to PIL and update draw - self.fromarray(self.im) - - def rectangle(self, xy, fill=None, outline=None, width=1): - # Add rectangle to image (PIL-only) - self.draw.rectangle(xy, fill, outline, width) - - def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): - # Add text to image (PIL-only) - if anchor == 'bottom': # start y from font bottom - w, h = self.font.getsize(text) # text width, height - xy[1] += 1 - h - self.draw.text(xy, text, fill=txt_color, font=self.font) - - def fromarray(self, im): - # Update self.im from a numpy array - self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) - self.draw = ImageDraw.Draw(self.im) - - def result(self): - # Return annotated image as array - return np.asarray(self.im) - - -def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): - """ - x: Features to be visualized - module_type: Module type - stage: Module stage within model - n: Maximum number of feature maps to plot - save_dir: Directory to save results - """ - if 'Detect' not in module_type: - batch, channels, height, width = x.shape # batch, channels, height, width - if height > 1 and width > 1: - f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename - - blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels - n = min(n, channels) # number of plots - fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols - ax = ax.ravel() - plt.subplots_adjust(wspace=0.05, hspace=0.05) - for i in range(n): - ax[i].imshow(blocks[i].squeeze()) # cmap='gray' - ax[i].axis('off') - - LOGGER.info(f'Saving {f}... ({n}/{channels})') - plt.savefig(f, dpi=300, bbox_inches='tight') - plt.close() - np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save - - -def hist2d(x, y, n=100): - # 2d histogram used in labels.png and evolve.png - xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) - hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) - xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) - yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) - return np.log(hist[xidx, yidx]) - - -def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): - from scipy.signal import butter, filtfilt - - # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy - def butter_lowpass(cutoff, fs, order): - nyq = 0.5 * fs - normal_cutoff = cutoff / nyq - return butter(order, normal_cutoff, btype='low', analog=False) - - b, a = butter_lowpass(cutoff, fs, order=order) - return filtfilt(b, a, data) # forward-backward filter - - -def output_to_target(output, max_det=300): - # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting - targets = [] - for i, o in enumerate(output): - box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1) - j = torch.full((conf.shape[0], 1), i) - targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1)) - return torch.cat(targets, 0).numpy() - - -@threaded -def plot_images(images, targets, paths=None, fname='images.jpg', names=None): - # Plot image grid with labels - if isinstance(images, torch.Tensor): - images = images.cpu().float().numpy() - if isinstance(targets, torch.Tensor): - targets = targets.cpu().numpy() - - max_size = 1920 # max image size - max_subplots = 16 # max image subplots, i.e. 4x4 - bs, _, h, w = images.shape # batch size, _, height, width - bs = min(bs, max_subplots) # limit plot images - ns = np.ceil(bs ** 0.5) # number of subplots (square) - if np.max(images[0]) <= 1: - images *= 255 # de-normalise (optional) - - # Build Image - mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init - for i, im in enumerate(images): - if i == max_subplots: # if last batch has fewer images than we expect - break - x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - im = im.transpose(1, 2, 0) - mosaic[y:y + h, x:x + w, :] = im - - # Resize (optional) - scale = max_size / ns / max(h, w) - if scale < 1: - h = math.ceil(scale * h) - w = math.ceil(scale * w) - mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) - - # Annotate - fs = int((h + w) * ns * 0.01) # font size - annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) - for i in range(i + 1): - x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders - if paths: - annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames - if len(targets) > 0: - ti = targets[targets[:, 0] == i] # image targets - boxes = xywh2xyxy(ti[:, 2:6]).T - classes = ti[:, 1].astype('int') - labels = ti.shape[1] == 6 # labels if no conf column - conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) - - if boxes.shape[1]: - if boxes.max() <= 1.01: # if normalized with tolerance 0.01 - boxes[[0, 2]] *= w # scale to pixels - boxes[[1, 3]] *= h - elif scale < 1: # absolute coords need scale if image scales - boxes *= scale - boxes[[0, 2]] += x - boxes[[1, 3]] += y - for j, box in enumerate(boxes.T.tolist()): - cls = classes[j] - color = colors(cls) - cls = names[cls] if names else cls - if labels or conf[j] > 0.25: # 0.25 conf thresh - label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' - annotator.box_label(box, label, color=color) - annotator.im.save(fname) # save - - -def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): - # Plot LR simulating training for full epochs - optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals - y = [] - for _ in range(epochs): - scheduler.step() - y.append(optimizer.param_groups[0]['lr']) - plt.plot(y, '.-', label='LR') - plt.xlabel('epoch') - plt.ylabel('LR') - plt.grid() - plt.xlim(0, epochs) - plt.ylim(0) - plt.savefig(Path(save_dir) / 'LR.png', dpi=200) - plt.close() - - -def plot_val_txt(): # from utils.plots import *; plot_val() - # Plot val.txt histograms - x = np.loadtxt('val.txt', dtype=np.float32) - box = xyxy2xywh(x[:, :4]) - cx, cy = box[:, 0], box[:, 1] - - fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) - ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) - ax.set_aspect('equal') - plt.savefig('hist2d.png', dpi=300) - - fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) - ax[0].hist(cx, bins=600) - ax[1].hist(cy, bins=600) - plt.savefig('hist1d.png', dpi=200) - - -def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() - # Plot targets.txt histograms - x = np.loadtxt('targets.txt', dtype=np.float32).T - s = ['x targets', 'y targets', 'width targets', 'height targets'] - fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) - ax = ax.ravel() - for i in range(4): - ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') - ax[i].legend() - ax[i].set_title(s[i]) - plt.savefig('targets.jpg', dpi=200) - - -def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() - # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) - save_dir = Path(file).parent if file else Path(dir) - plot2 = False # plot additional results - if plot2: - ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() - - fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) - # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: - for f in sorted(save_dir.glob('study*.txt')): - y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T - x = np.arange(y.shape[1]) if x is None else np.array(x) - if plot2: - s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] - for i in range(7): - ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) - ax[i].set_title(s[i]) - - j = y[3].argmax() + 1 - ax2.plot(y[5, 1:j], - y[3, 1:j] * 1E2, - '.-', - linewidth=2, - markersize=8, - label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) - - ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], - 'k.-', - linewidth=2, - markersize=8, - alpha=.25, - label='EfficientDet') - - ax2.grid(alpha=0.2) - ax2.set_yticks(np.arange(20, 60, 5)) - ax2.set_xlim(0, 57) - ax2.set_ylim(25, 55) - ax2.set_xlabel('GPU Speed (ms/img)') - ax2.set_ylabel('COCO AP val') - ax2.legend(loc='lower right') - f = save_dir / 'study.png' - print(f'Saving {f}...') - plt.savefig(f, dpi=300) - - -@TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395 -def plot_labels(labels, names=(), save_dir=Path('')): - # plot dataset labels - LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") - c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes - nc = int(c.max() + 1) # number of classes - x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) - - # seaborn correlogram - sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) - plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) - plt.close() - - # matplotlib labels - matplotlib.use('svg') # faster - ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() - y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - with contextlib.suppress(Exception): # color histogram bars by class - [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 - ax[0].set_ylabel('instances') - if 0 < len(names) < 30: - ax[0].set_xticks(range(len(names))) - ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10) - else: - ax[0].set_xlabel('classes') - sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) - sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) - - # rectangles - labels[:, 1:3] = 0.5 # center - labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 - img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) - for cls, *box in labels[:1000]: - ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot - ax[1].imshow(img) - ax[1].axis('off') - - for a in [0, 1, 2, 3]: - for s in ['top', 'right', 'left', 'bottom']: - ax[a].spines[s].set_visible(False) - - plt.savefig(save_dir / 'labels.jpg', dpi=200) - matplotlib.use('Agg') - plt.close() - - -def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')): - # Show classification image grid with labels (optional) and predictions (optional) - from utils.augmentations import denormalize - - names = names or [f'class{i}' for i in range(1000)] - blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im), - dim=0) # select batch index 0, block by channels - n = min(len(blocks), nmax) # number of plots - m = min(8, round(n ** 0.5)) # 8 x 8 default - fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols - ax = ax.ravel() if m > 1 else [ax] - # plt.subplots_adjust(wspace=0.05, hspace=0.05) - for i in range(n): - ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0)) - ax[i].axis('off') - if labels is not None: - s = names[labels[i]] + (f'—{names[pred[i]]}' if pred is not None else '') - ax[i].set_title(s, fontsize=8, verticalalignment='top') - plt.savefig(f, dpi=300, bbox_inches='tight') - plt.close() - if verbose: - LOGGER.info(f"Saving {f}") - if labels is not None: - LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) - if pred is not None: - LOGGER.info('Predicted:' + ' '.join(f'{names[i]:3s}' for i in pred[:nmax])) - return f - - -def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() - # Plot evolve.csv hyp evolution results - evolve_csv = Path(evolve_csv) - data = pd.read_csv(evolve_csv) - keys = [x.strip() for x in data.columns] - x = data.values - f = fitness(x) - j = np.argmax(f) # max fitness index - plt.figure(figsize=(10, 12), tight_layout=True) - matplotlib.rc('font', **{'size': 8}) - print(f'Best results from row {j} of {evolve_csv}:') - for i, k in enumerate(keys[7:]): - v = x[:, 7 + i] - mu = v[j] # best single result - plt.subplot(6, 5, i + 1) - plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') - plt.plot(mu, f.max(), 'k+', markersize=15) - plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters - if i % 5 != 0: - plt.yticks([]) - print(f'{k:>15}: {mu:.3g}') - f = evolve_csv.with_suffix('.png') # filename - plt.savefig(f, dpi=200) - plt.close() - print(f'Saved {f}') - - -def plot_results(file='path/to/results.csv', dir=''): - # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') - save_dir = Path(file).parent if file else Path(dir) - fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) - ax = ax.ravel() - files = list(save_dir.glob('results*.csv')) - assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' - for f in files: - try: - data = pd.read_csv(f) - s = [x.strip() for x in data.columns] - x = data.values[:, 0] - for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): - y = data.values[:, j].astype('float') - # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) - ax[i].set_title(s[j], fontsize=12) - # if j in [8, 9, 10]: # share train and val loss y axes - # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) - except Exception as e: - LOGGER.info(f'Warning: Plotting error for {f}: {e}') - ax[1].legend() - fig.savefig(save_dir / 'results.png', dpi=200) - plt.close() - - -def profile_idetection(start=0, stop=0, labels=(), save_dir=''): - # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() - ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() - s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] - files = list(Path(save_dir).glob('frames*.txt')) - for fi, f in enumerate(files): - try: - results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows - n = results.shape[1] # number of rows - x = np.arange(start, min(stop, n) if stop else n) - results = results[:, x] - t = (results[0] - results[0].min()) # set t0=0s - results[0] = x - for i, a in enumerate(ax): - if i < len(results): - label = labels[fi] if len(labels) else f.stem.replace('frames_', '') - a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) - a.set_title(s[i]) - a.set_xlabel('time (s)') - # if fi == len(files) - 1: - # a.set_ylim(bottom=0) - for side in ['top', 'right']: - a.spines[side].set_visible(False) - else: - a.remove() - except Exception as e: - print(f'Warning: Plotting error for {f}; {e}') - ax[1].legend() - plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) - - -def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): - # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop - xyxy = torch.tensor(xyxy).view(-1, 4) - b = xyxy2xywh(xyxy) # boxes - if square: - b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square - b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad - xyxy = xywh2xyxy(b).long() - clip_boxes(xyxy, im.shape) - crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] - if save: - file.parent.mkdir(parents=True, exist_ok=True) # make directory - f = str(increment_path(file).with_suffix('.jpg')) - # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue - Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB - return crop diff --git a/utils/segment/__init__.py b/utils/segment/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/utils/segment/__pycache__/__init__.cpython-38.pyc b/utils/segment/__pycache__/__init__.cpython-38.pyc deleted file mode 100644 index e4300726de064d4bbbbb1a6f9a68b5c34623e40d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 173 zcmWIL<>g`k0$U;XWDxxrL?8o3AjbiSi&=m~3PUi1CZpd8?fsY2+QSWCP#1)g9aM2(f?-)v801?IH1*&%11``0_PPmGw1UeT~S}7-x~S zR^rUpeEm6Fvwg!i@h$sj{E}}!=WC8X>z928xr%?*pYo@XoAS^3r~EgNoA%H9U-4&< zJLU67tU7mqQCYpI(P#Wr@U`$R<4r$BlCur=R3B&qePCpKsO8+p?}au=TlxhxpOOC5 z7?=ZVP#V}7-_UadnnrGJn7;9`PSS-2$q&j(LO&@%{VEA3XL&g{H%+9aO*YGNC$~0D z^|h%y%di5|o8yc*E}r>ik@V)>o(TM~nFYQZdFf{AuJ5{gfk?u*?I!)q4YmTYn{}ud zCz%_@Zq^BoD6Ow2JBzBn$3ZhoM7W2Z(8qEofy$Mv7I+Rk;U?c&|3;aihjNZx$O#93(l;}=Vv zBnp<=Jul6??$Tn@Yj%PqHPlk4zrNJ(X2R?4W??gJ>?Yl0>#YVx$pW#|&%$oHlm_i6 zh_j`35C;%k?Cr{O6BZ_^%@RgP)yZWL&v8o=O9_X(|`;fK+JLO=t^P&M1nR2Ps@ zt4tvY%TI)#0X%M)x^X{RNADB(j0cOOR4EfQg0beC|Cx${!Obx4VcanC+ChT=yWn0S z#8wNCiy-TZ_@+Asu^Q^&pLpuhUJtur9C%{or6!Fi@dG*Sb;EYtXeI*2O9cV&i^43Q zzg@KjSw)(yZsKLq*b0MfSxUopl!U(IE3yI%2cnk%(}A+8)Z1C3YD-IPVJdCfk+c!U z(kfP~Iux_is}^CDCN!=+Hf3x8PFzLc5-KE`;G#1 zW$$l?fL;$kD#8CfqQx96TUU5JAg6dh{#mGmV6q}EQu(V$s^tQ7U!#hzQ{o~4iFOk} zp14SSgOYhlzKNtdCB8-ZZ&UIeO4KQ^Cooj-72lIy0V z>;-m-N`OiZ=u!!gf}cOE$&Fby$9zM{5qB!B!`hZMFk_n-6>u{%ur1~GxynI)pKFX_ z2$YLaRr_2aQH*2KJg5eeICL2zk_~;I`^F{{f5^bolbYYK-)j5(BaUNSJI}}4GN#L zg2kkCyjTf<lg2Q_WMu%{-?d$O*)p7hp3v~WxVOn zkYw=gh#(QKj@U>W0?Q*lK)g9Ld~L+x&-H?QflxlFyTHx^^>Yk}rs!2W4}8PU#@=Vj z&Sa76ut;46anW@*3cUEJJ3b6Oll2WmkA*`fwHnkO6l%2-YVFKDDql~s;3hnM(R<;K zx7{7L$aRjpV}%koO!~kX=JlWei6StCk$Tk>6f8+A^0KJkl{_PkLRcd8v>&}Ri`edZ z@u^8Y=%sQ>Ip7p42$NWv{TQFuO{aWt0|T$qjuUY(`y4_sL7RWVG!&L8)7(Oa%reV{ zxa^Ghkdrx-VN@B$P~Y>#QU5Yy{O-SDjIj;>XU4dK0jg@E#ZjdQs9x0sZG!j-C3h)# zjl;{zoV2S6TF1LO3*r_(M_HD-o@fVYcI>`JsHaF*ZSN<@wAnxd&KaUv72o2k(0`Xb z+Z*VaF*I|e8x~%KB*dk_J5I!=CRQ_hST63Axp5Z^3)2{J4XqYhpQ9Dw1Esia;@&xt z8CFIJVPMQD3MIgzI6K4X+`za9B}SaH4=?5#PEw@~Gyz;cPym%yGl`pC)}+muJmhr= zO^@&@57q`=o}DJj1u_U5+OM?_4Rod{XDx^Xt6Y{qSH!mnLAAwelJC5}x6Z9@h^Wq6QI=a<4B<#QwMlS-2?S3Mby8hN)H< z`^!n3C4G^~>D7DhuQu*|@czBk`knjo%=~V|DM$1o~#sm zAH)celjly&(3%9}X4m6xX#ZXzwCd+{{w3iiRR=IleA=>iCxC zkW2CJ2V)w=9_J8|(0i57a6$A@0Qdi4#pNbwkX@q`j$v$Zqo9=RCD zi>-dFXv*tiS+R5u%SHU9A~6c*Rm7t_h@vQUqSWX|jG~sIt_t=oc=CQXxJ}qh$-p*d TZ1Wl2p0Q_~OU`9y+L`_r&2xEZ diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py deleted file mode 100644 index 169adde..0000000 --- a/utils/segment/augmentations.py +++ /dev/null @@ -1,104 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Image augmentation functions -""" - -import math -import random - -import cv2 -import numpy as np - -from ..augmentations import box_candidates -from ..general import resample_segments, segment2box - - -def mixup(im, labels, segments, im2, labels2, segments2): - # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf - r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - im = (im * r + im2 * (1 - r)).astype(np.uint8) - labels = np.concatenate((labels, labels2), 0) - segments = np.concatenate((segments, segments2), 0) - return im, labels, segments - - -def random_perspective(im, - targets=(), - segments=(), - degrees=10, - translate=.1, - scale=.1, - shear=10, - perspective=0.0, - border=(0, 0)): - # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) - # targets = [cls, xyxy] - - height = im.shape[0] + border[0] * 2 # shape(h,w,c) - width = im.shape[1] + border[1] * 2 - - # Center - C = np.eye(3) - C[0, 2] = -im.shape[1] / 2 # x translation (pixels) - C[1, 2] = -im.shape[0] / 2 # y translation (pixels) - - # Perspective - P = np.eye(3) - P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) - P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) - - # Rotation and Scale - R = np.eye(3) - a = random.uniform(-degrees, degrees) - # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations - s = random.uniform(1 - scale, 1 + scale) - # s = 2 ** random.uniform(-scale, scale) - R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) - - # Shear - S = np.eye(3) - S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) - S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) - - # Translation - T = np.eye(3) - T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels) - T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels) - - # Combined rotation matrix - M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT - if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed - if perspective: - im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) - else: # affine - im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(im[:, :, ::-1]) # base - # ax[1].imshow(im2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) - new_segments = [] - if n: - new = np.zeros((n, 4)) - segments = resample_segments(segments) # upsample - for i, segment in enumerate(segments): - xy = np.ones((len(segment), 3)) - xy[:, :2] = segment - xy = xy @ M.T # transform - xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine - - # clip - new[i] = segment2box(xy, width, height) - new_segments.append(xy) - - # filter candidates - i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01) - targets = targets[i] - targets[:, 1:5] = new[i] - new_segments = np.array(new_segments)[i] - - return im, targets, new_segments diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py deleted file mode 100644 index a63d6ec..0000000 --- a/utils/segment/dataloaders.py +++ /dev/null @@ -1,330 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Dataloaders -""" - -import os -import random - -import cv2 -import numpy as np -import torch -from torch.utils.data import DataLoader, distributed - -from ..augmentations import augment_hsv, copy_paste, letterbox -from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker -from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn -from ..torch_utils import torch_distributed_zero_first -from .augmentations import mixup, random_perspective - -RANK = int(os.getenv('RANK', -1)) - - -def create_dataloader(path, - imgsz, - batch_size, - stride, - single_cls=False, - hyp=None, - augment=False, - cache=False, - pad=0.0, - rect=False, - rank=-1, - workers=8, - image_weights=False, - quad=False, - prefix='', - shuffle=False, - mask_downsample_ratio=1, - overlap_mask=False): - if rect and shuffle: - LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') - shuffle = False - with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP - dataset = LoadImagesAndLabelsAndMasks( - path, - imgsz, - batch_size, - augment=augment, # augmentation - hyp=hyp, # hyperparameters - rect=rect, # rectangular batches - cache_images=cache, - single_cls=single_cls, - stride=int(stride), - pad=pad, - image_weights=image_weights, - prefix=prefix, - downsample_ratio=mask_downsample_ratio, - overlap=overlap_mask) - - batch_size = min(batch_size, len(dataset)) - nd = torch.cuda.device_count() # number of CUDA devices - nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers - sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) - loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates - generator = torch.Generator() - generator.manual_seed(6148914691236517205 + RANK) - return loader( - dataset, - batch_size=batch_size, - shuffle=shuffle and sampler is None, - num_workers=nw, - sampler=sampler, - pin_memory=True, - collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, - worker_init_fn=seed_worker, - generator=generator, - ), dataset - - -class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing - - def __init__( - self, - path, - img_size=640, - batch_size=16, - augment=False, - hyp=None, - rect=False, - image_weights=False, - cache_images=False, - single_cls=False, - stride=32, - pad=0, - prefix="", - downsample_ratio=1, - overlap=False, - ): - super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, - stride, pad, prefix) - self.downsample_ratio = downsample_ratio - self.overlap = overlap - - def __getitem__(self, index): - index = self.indices[index] # linear, shuffled, or image_weights - - hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp['mosaic'] - masks = [] - if mosaic: - # Load mosaic - img, labels, segments = self.load_mosaic(index) - shapes = None - - # MixUp augmentation - if random.random() < hyp["mixup"]: - img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) - - else: - # Load image - img, (h0, w0), (h, w) = self.load_image(index) - - # Letterbox - shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape - img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) - shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling - - labels = self.labels[index].copy() - # [array, array, ....], array.shape=(num_points, 2), xyxyxyxy - segments = self.segments[index].copy() - if len(segments): - for i_s in range(len(segments)): - segments[i_s] = xyn2xy( - segments[i_s], - ratio[0] * w, - ratio[1] * h, - padw=pad[0], - padh=pad[1], - ) - if labels.size: # normalized xywh to pixel xyxy format - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) - - if self.augment: - img, labels, segments = random_perspective(img, - labels, - segments=segments, - degrees=hyp["degrees"], - translate=hyp["translate"], - scale=hyp["scale"], - shear=hyp["shear"], - perspective=hyp["perspective"]) - - nl = len(labels) # number of labels - if nl: - labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) - if self.overlap: - masks, sorted_idx = polygons2masks_overlap(img.shape[:2], - segments, - downsample_ratio=self.downsample_ratio) - masks = masks[None] # (640, 640) -> (1, 640, 640) - labels = labels[sorted_idx] - else: - masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio) - - masks = (torch.from_numpy(masks) if len(masks) else torch.zeros(1 if self.overlap else nl, img.shape[0] // - self.downsample_ratio, img.shape[1] // - self.downsample_ratio)) - # TODO: albumentations support - if self.augment: - # Albumentations - # there are some augmentation that won't change boxes and masks, - # so just be it for now. - img, labels = self.albumentations(img, labels) - nl = len(labels) # update after albumentations - - # HSV color-space - augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) - - # Flip up-down - if random.random() < hyp["flipud"]: - img = np.flipud(img) - if nl: - labels[:, 2] = 1 - labels[:, 2] - masks = torch.flip(masks, dims=[1]) - - # Flip left-right - if random.random() < hyp["fliplr"]: - img = np.fliplr(img) - if nl: - labels[:, 1] = 1 - labels[:, 1] - masks = torch.flip(masks, dims=[2]) - - # Cutouts # labels = cutout(img, labels, p=0.5) - - labels_out = torch.zeros((nl, 6)) - if nl: - labels_out[:, 1:] = torch.from_numpy(labels) - - # Convert - img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img = np.ascontiguousarray(img) - - return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks) - - def load_mosaic(self, index): - # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic - labels4, segments4 = [], [] - s = self.img_size - yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y - - # 3 additional image indices - indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = self.load_image(index) - - # place img in img4 - if i == 0: # top left - img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) - x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) - elif i == 1: # top right - x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc - x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h - elif i == 2: # bottom left - x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) - x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) - elif i == 3: # bottom right - x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) - x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) - - img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - padw = x1a - x1b - padh = y1a - y1b - - labels, segments = self.labels[index].copy(), self.segments[index].copy() - - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padw, padh) for x in segments] - labels4.append(labels) - segments4.extend(segments) - - # Concat/clip labels - labels4 = np.concatenate(labels4, 0) - for x in (labels4[:, 1:], *segments4): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img4, labels4 = replicate(img4, labels4) # replicate - - # Augment - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"]) - img4, labels4, segments4 = random_perspective(img4, - labels4, - segments4, - degrees=self.hyp["degrees"], - translate=self.hyp["translate"], - scale=self.hyp["scale"], - shear=self.hyp["shear"], - perspective=self.hyp["perspective"], - border=self.mosaic_border) # border to remove - return img4, labels4, segments4 - - @staticmethod - def collate_fn(batch): - img, label, path, shapes, masks = zip(*batch) # transposed - batched_masks = torch.cat(masks, 0) - for i, l in enumerate(label): - l[:, 0] = i # add target image index for build_targets() - return torch.stack(img, 0), torch.cat(label, 0), path, shapes, batched_masks - - -def polygon2mask(img_size, polygons, color=1, downsample_ratio=1): - """ - Args: - img_size (tuple): The image size. - polygons (np.ndarray): [N, M], N is the number of polygons, - M is the number of points(Be divided by 2). - """ - mask = np.zeros(img_size, dtype=np.uint8) - polygons = np.asarray(polygons) - polygons = polygons.astype(np.int32) - shape = polygons.shape - polygons = polygons.reshape(shape[0], -1, 2) - cv2.fillPoly(mask, polygons, color=color) - nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio) - # NOTE: fillPoly firstly then resize is trying the keep the same way - # of loss calculation when mask-ratio=1. - mask = cv2.resize(mask, (nw, nh)) - return mask - - -def polygons2masks(img_size, polygons, color, downsample_ratio=1): - """ - Args: - img_size (tuple): The image size. - polygons (list[np.ndarray]): each polygon is [N, M], - N is the number of polygons, - M is the number of points(Be divided by 2). - """ - masks = [] - for si in range(len(polygons)): - mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio) - masks.append(mask) - return np.array(masks) - - -def polygons2masks_overlap(img_size, segments, downsample_ratio=1): - """Return a (640, 640) overlap mask.""" - masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), - dtype=np.int32 if len(segments) > 255 else np.uint8) - areas = [] - ms = [] - for si in range(len(segments)): - mask = polygon2mask( - img_size, - [segments[si].reshape(-1)], - downsample_ratio=downsample_ratio, - color=1, - ) - ms.append(mask) - areas.append(mask.sum()) - areas = np.asarray(areas) - index = np.argsort(-areas) - ms = np.array(ms)[index] - for i in range(len(segments)): - mask = ms[i] * (i + 1) - masks = masks + mask - masks = np.clip(masks, a_min=0, a_max=i + 1) - return masks, index diff --git a/utils/segment/general.py b/utils/segment/general.py deleted file mode 100644 index 43bdc46..0000000 --- a/utils/segment/general.py +++ /dev/null @@ -1,134 +0,0 @@ -import cv2 -import numpy as np -import torch -import torch.nn.functional as F - - -def crop_mask(masks, boxes): - """ - "Crop" predicted masks by zeroing out everything not in the predicted bbox. - Vectorized by Chong (thanks Chong). - - Args: - - masks should be a size [h, w, n] tensor of masks - - boxes should be a size [n, 4] tensor of bbox coords in relative point form - """ - - n, h, w = masks.shape - x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) - r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) - c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) - - return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) - - -def process_mask_upsample(protos, masks_in, bboxes, shape): - """ - Crop after upsample. - proto_out: [mask_dim, mask_h, mask_w] - out_masks: [n, mask_dim], n is number of masks after nms - bboxes: [n, 4], n is number of masks after nms - shape:input_image_size, (h, w) - - return: h, w, n - """ - - c, mh, mw = protos.shape # CHW - masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) - masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW - masks = crop_mask(masks, bboxes) # CHW - return masks.gt_(0.5) - - -def process_mask(protos, masks_in, bboxes, shape, upsample=False): - """ - Crop before upsample. - proto_out: [mask_dim, mask_h, mask_w] - out_masks: [n, mask_dim], n is number of masks after nms - bboxes: [n, 4], n is number of masks after nms - shape:input_image_size, (h, w) - - return: h, w, n - """ - - c, mh, mw = protos.shape # CHW - ih, iw = shape - masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW - - downsampled_bboxes = bboxes.clone() - downsampled_bboxes[:, 0] *= mw / iw - downsampled_bboxes[:, 2] *= mw / iw - downsampled_bboxes[:, 3] *= mh / ih - downsampled_bboxes[:, 1] *= mh / ih - - masks = crop_mask(masks, downsampled_bboxes) # CHW - if upsample: - masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW - return masks.gt_(0.5) - - -def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): - """ - img1_shape: model input shape, [h, w] - img0_shape: origin pic shape, [h, w, 3] - masks: [h, w, num] - """ - # Rescale coordinates (xyxy) from im1_shape to im0_shape - if ratio_pad is None: # calculate from im0_shape - gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new - pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding - else: - pad = ratio_pad[1] - top, left = int(pad[1]), int(pad[0]) # y, x - bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) - - if len(masks.shape) < 2: - raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') - masks = masks[top:bottom, left:right] - # masks = masks.permute(2, 0, 1).contiguous() - # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] - # masks = masks.permute(1, 2, 0).contiguous() - masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) - - if len(masks.shape) == 2: - masks = masks[:, :, None] - return masks - - -def mask_iou(mask1, mask2, eps=1e-7): - """ - mask1: [N, n] m1 means number of predicted objects - mask2: [M, n] m2 means number of gt objects - Note: n means image_w x image_h - - return: masks iou, [N, M] - """ - intersection = torch.matmul(mask1, mask2.t()).clamp(0) - union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection - return intersection / (union + eps) - - -def masks_iou(mask1, mask2, eps=1e-7): - """ - mask1: [N, n] m1 means number of predicted objects - mask2: [N, n] m2 means number of gt objects - Note: n means image_w x image_h - - return: masks iou, (N, ) - """ - intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) - union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection - return intersection / (union + eps) - - -def masks2segments(masks, strategy='largest'): - # Convert masks(n,160,160) into segments(n,xy) - segments = [] - for x in masks.int().cpu().numpy().astype('uint8'): - c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] - if strategy == 'concat': # concatenate all segments - c = np.concatenate([x.reshape(-1, 2) for x in c]) - elif strategy == 'largest': # select largest segment - c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) - segments.append(c.astype('float32')) - return segments diff --git a/utils/segment/loss.py b/utils/segment/loss.py deleted file mode 100644 index b45b2c2..0000000 --- a/utils/segment/loss.py +++ /dev/null @@ -1,186 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..general import xywh2xyxy -from ..loss import FocalLoss, smooth_BCE -from ..metrics import bbox_iou -from ..torch_utils import de_parallel -from .general import crop_mask - - -class ComputeLoss: - # Compute losses - def __init__(self, model, autobalance=False, overlap=False): - self.sort_obj_iou = False - self.overlap = overlap - device = next(model.parameters()).device # get model device - h = model.hyp # hyperparameters - self.device = device - - # Define criteria - BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) - BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) - - # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets - - # Focal loss - g = h['fl_gamma'] # focal loss gamma - if g > 0: - BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - - m = de_parallel(model).model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 - self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index - self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance - self.na = m.na # number of anchors - self.nc = m.nc # number of classes - self.nl = m.nl # number of layers - self.nm = m.nm # number of masks - self.anchors = m.anchors - self.device = device - - def __call__(self, preds, targets, masks): # predictions, targets, model - p, proto = preds - bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width - lcls = torch.zeros(1, device=self.device) - lbox = torch.zeros(1, device=self.device) - lobj = torch.zeros(1, device=self.device) - lseg = torch.zeros(1, device=self.device) - tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets) # targets - - # Losses - for i, pi in enumerate(p): # layer index, layer predictions - b, a, gj, gi = indices[i] # image, anchor, gridy, gridx - tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj - - n = b.shape[0] # number of targets - if n: - pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions - - # Box regression - pxy = pxy.sigmoid() * 2 - 0.5 - pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] - pbox = torch.cat((pxy, pwh), 1) # predicted box - iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) - lbox += (1.0 - iou).mean() # iou loss - - # Objectness - iou = iou.detach().clamp(0).type(tobj.dtype) - if self.sort_obj_iou: - j = iou.argsort() - b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] - if self.gr < 1: - iou = (1.0 - self.gr) + self.gr * iou - tobj[b, a, gj, gi] = iou # iou ratio - - # Classification - if self.nc > 1: # cls loss (only if multiple classes) - t = torch.full_like(pcls, self.cn, device=self.device) # targets - t[range(n), tcls[i]] = self.cp - lcls += self.BCEcls(pcls, t) # BCE - - # Mask regression - if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample - masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] - marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized - mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) - for bi in b.unique(): - j = b == bi # matching index - if self.overlap: - mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0) - else: - mask_gti = masks[tidxs[i]][j] - lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j]) - - obji = self.BCEobj(pi[..., 4], tobj) - lobj += obji * self.balance[i] # obj loss - if self.autobalance: - self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() - - if self.autobalance: - self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp["box"] - lobj *= self.hyp["obj"] - lcls *= self.hyp["cls"] - lseg *= self.hyp["box"] / bs - - loss = lbox + lobj + lcls + lseg - return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() - - def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): - # Mask loss for one image - pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) - loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") - return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() - - def build_targets(self, p, targets): - # Build targets for compute_loss(), input targets(image,class,x,y,w,h) - na, nt = self.na, targets.shape[0] # number of anchors, targets - tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], [] - gain = torch.ones(8, device=self.device) # normalized to gridspace gain - ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) - if self.overlap: - batch = p[0].shape[0] - ti = [] - for i in range(batch): - num = (targets[:, 0] == i).sum() # find number of targets of each image - ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1) # (na, num) - ti = torch.cat(ti, 1) # (na, nt) - else: - ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1) - targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices - - g = 0.5 # bias - off = torch.tensor( - [ - [0, 0], - [1, 0], - [0, 1], - [-1, 0], - [0, -1], # j,k,l,m - # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], - device=self.device).float() * g # offsets - - for i in range(self.nl): - anchors, shape = self.anchors[i], p[i].shape - gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain - - # Match targets to anchors - t = targets * gain # shape(3,n,7) - if nt: - # Matches - r = t[..., 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare - # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) - t = t[j] # filter - - # Offsets - gxy = t[:, 2:4] # grid xy - gxi = gain[[2, 3]] - gxy # inverse - j, k = ((gxy % 1 < g) & (gxy > 1)).T - l, m = ((gxi % 1 < g) & (gxi > 1)).T - j = torch.stack((torch.ones_like(j), j, k, l, m)) - t = t.repeat((5, 1, 1))[j] - offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] - else: - t = targets[0] - offsets = 0 - - # Define - bc, gxy, gwh, at = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors - (a, tidx), (b, c) = at.long().T, bc.long().T # anchors, image, class - gij = (gxy - offsets).long() - gi, gj = gij.T # grid indices - - # Append - indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid - tbox.append(torch.cat((gxy - gij, gwh), 1)) # box - anch.append(anchors[a]) # anchors - tcls.append(c) # class - tidxs.append(tidx) - xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6]) # xywh normalized - - return tcls, tbox, indices, anch, tidxs, xywhn diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py deleted file mode 100644 index b09ce23..0000000 --- a/utils/segment/metrics.py +++ /dev/null @@ -1,210 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Model validation metrics -""" - -import numpy as np - -from ..metrics import ap_per_class - - -def fitness(x): - # Model fitness as a weighted combination of metrics - w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9] - return (x[:, :8] * w).sum(1) - - -def ap_per_class_box_and_mask( - tp_m, - tp_b, - conf, - pred_cls, - target_cls, - plot=False, - save_dir=".", - names=(), -): - """ - Args: - tp_b: tp of boxes. - tp_m: tp of masks. - other arguments see `func: ap_per_class`. - """ - results_boxes = ap_per_class(tp_b, - conf, - pred_cls, - target_cls, - plot=plot, - save_dir=save_dir, - names=names, - prefix="Box")[2:] - results_masks = ap_per_class(tp_m, - conf, - pred_cls, - target_cls, - plot=plot, - save_dir=save_dir, - names=names, - prefix="Mask")[2:] - - results = { - "boxes": { - "p": results_boxes[0], - "r": results_boxes[1], - "ap": results_boxes[3], - "f1": results_boxes[2], - "ap_class": results_boxes[4]}, - "masks": { - "p": results_masks[0], - "r": results_masks[1], - "ap": results_masks[3], - "f1": results_masks[2], - "ap_class": results_masks[4]}} - return results - - -class Metric: - - def __init__(self) -> None: - self.p = [] # (nc, ) - self.r = [] # (nc, ) - self.f1 = [] # (nc, ) - self.all_ap = [] # (nc, 10) - self.ap_class_index = [] # (nc, ) - - @property - def ap50(self): - """AP@0.5 of all classes. - Return: - (nc, ) or []. - """ - return self.all_ap[:, 0] if len(self.all_ap) else [] - - @property - def ap(self): - """AP@0.5:0.95 - Return: - (nc, ) or []. - """ - return self.all_ap.mean(1) if len(self.all_ap) else [] - - @property - def mp(self): - """mean precision of all classes. - Return: - float. - """ - return self.p.mean() if len(self.p) else 0.0 - - @property - def mr(self): - """mean recall of all classes. - Return: - float. - """ - return self.r.mean() if len(self.r) else 0.0 - - @property - def map50(self): - """Mean AP@0.5 of all classes. - Return: - float. - """ - return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 - - @property - def map(self): - """Mean AP@0.5:0.95 of all classes. - Return: - float. - """ - return self.all_ap.mean() if len(self.all_ap) else 0.0 - - def mean_results(self): - """Mean of results, return mp, mr, map50, map""" - return (self.mp, self.mr, self.map50, self.map) - - def class_result(self, i): - """class-aware result, return p[i], r[i], ap50[i], ap[i]""" - return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) - - def get_maps(self, nc): - maps = np.zeros(nc) + self.map - for i, c in enumerate(self.ap_class_index): - maps[c] = self.ap[i] - return maps - - def update(self, results): - """ - Args: - results: tuple(p, r, ap, f1, ap_class) - """ - p, r, all_ap, f1, ap_class_index = results - self.p = p - self.r = r - self.all_ap = all_ap - self.f1 = f1 - self.ap_class_index = ap_class_index - - -class Metrics: - """Metric for boxes and masks.""" - - def __init__(self) -> None: - self.metric_box = Metric() - self.metric_mask = Metric() - - def update(self, results): - """ - Args: - results: Dict{'boxes': Dict{}, 'masks': Dict{}} - """ - self.metric_box.update(list(results["boxes"].values())) - self.metric_mask.update(list(results["masks"].values())) - - def mean_results(self): - return self.metric_box.mean_results() + self.metric_mask.mean_results() - - def class_result(self, i): - return self.metric_box.class_result(i) + self.metric_mask.class_result(i) - - def get_maps(self, nc): - return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc) - - @property - def ap_class_index(self): - # boxes and masks have the same ap_class_index - return self.metric_box.ap_class_index - - -KEYS = [ - "train/box_loss", - "train/seg_loss", # train loss - "train/obj_loss", - "train/cls_loss", - "metrics/precision(B)", - "metrics/recall(B)", - "metrics/mAP_0.5(B)", - "metrics/mAP_0.5:0.95(B)", # metrics - "metrics/precision(M)", - "metrics/recall(M)", - "metrics/mAP_0.5(M)", - "metrics/mAP_0.5:0.95(M)", # metrics - "val/box_loss", - "val/seg_loss", # val loss - "val/obj_loss", - "val/cls_loss", - "x/lr0", - "x/lr1", - "x/lr2",] - -BEST_KEYS = [ - "best/epoch", - "best/precision(B)", - "best/recall(B)", - "best/mAP_0.5(B)", - "best/mAP_0.5:0.95(B)", - "best/precision(M)", - "best/recall(M)", - "best/mAP_0.5(M)", - "best/mAP_0.5:0.95(M)",] diff --git a/utils/segment/plots.py b/utils/segment/plots.py deleted file mode 100644 index 9b90900..0000000 --- a/utils/segment/plots.py +++ /dev/null @@ -1,143 +0,0 @@ -import contextlib -import math -from pathlib import Path - -import cv2 -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import torch - -from .. import threaded -from ..general import xywh2xyxy -from ..plots import Annotator, colors - - -@threaded -def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None): - # Plot image grid with labels - if isinstance(images, torch.Tensor): - images = images.cpu().float().numpy() - if isinstance(targets, torch.Tensor): - targets = targets.cpu().numpy() - if isinstance(masks, torch.Tensor): - masks = masks.cpu().numpy().astype(int) - - max_size = 1920 # max image size - max_subplots = 16 # max image subplots, i.e. 4x4 - bs, _, h, w = images.shape # batch size, _, height, width - bs = min(bs, max_subplots) # limit plot images - ns = np.ceil(bs ** 0.5) # number of subplots (square) - if np.max(images[0]) <= 1: - images *= 255 # de-normalise (optional) - - # Build Image - mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init - for i, im in enumerate(images): - if i == max_subplots: # if last batch has fewer images than we expect - break - x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - im = im.transpose(1, 2, 0) - mosaic[y:y + h, x:x + w, :] = im - - # Resize (optional) - scale = max_size / ns / max(h, w) - if scale < 1: - h = math.ceil(scale * h) - w = math.ceil(scale * w) - mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) - - # Annotate - fs = int((h + w) * ns * 0.01) # font size - annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) - for i in range(i + 1): - x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders - if paths: - annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames - if len(targets) > 0: - idx = targets[:, 0] == i - ti = targets[idx] # image targets - - boxes = xywh2xyxy(ti[:, 2:6]).T - classes = ti[:, 1].astype('int') - labels = ti.shape[1] == 6 # labels if no conf column - conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) - - if boxes.shape[1]: - if boxes.max() <= 1.01: # if normalized with tolerance 0.01 - boxes[[0, 2]] *= w # scale to pixels - boxes[[1, 3]] *= h - elif scale < 1: # absolute coords need scale if image scales - boxes *= scale - boxes[[0, 2]] += x - boxes[[1, 3]] += y - for j, box in enumerate(boxes.T.tolist()): - cls = classes[j] - color = colors(cls) - cls = names[cls] if names else cls - if labels or conf[j] > 0.25: # 0.25 conf thresh - label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' - annotator.box_label(box, label, color=color) - - # Plot masks - if len(masks): - if masks.max() > 1.0: # mean that masks are overlap - image_masks = masks[[i]] # (1, 640, 640) - nl = len(ti) - index = np.arange(nl).reshape(nl, 1, 1) + 1 - image_masks = np.repeat(image_masks, nl, axis=0) - image_masks = np.where(image_masks == index, 1.0, 0.0) - else: - image_masks = masks[idx] - - im = np.asarray(annotator.im).copy() - for j, box in enumerate(boxes.T.tolist()): - if labels or conf[j] > 0.25: # 0.25 conf thresh - color = colors(classes[j]) - mh, mw = image_masks[j].shape - if mh != h or mw != w: - mask = image_masks[j].astype(np.uint8) - mask = cv2.resize(mask, (w, h)) - mask = mask.astype(bool) - else: - mask = image_masks[j].astype(bool) - with contextlib.suppress(Exception): - im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 - annotator.fromarray(im) - annotator.im.save(fname) # save - - -def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): - # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') - save_dir = Path(file).parent if file else Path(dir) - fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) - ax = ax.ravel() - files = list(save_dir.glob("results*.csv")) - assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." - for f in files: - try: - data = pd.read_csv(f) - index = np.argmax(0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + - 0.1 * data.values[:, 11]) - s = [x.strip() for x in data.columns] - x = data.values[:, 0] - for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): - y = data.values[:, j] - # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2) - if best: - # best - ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3) - ax[i].set_title(s[j] + f"\n{round(y[index], 5)}") - else: - # last - ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3) - ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}") - # if j in [8, 9, 10]: # share train and val loss y axes - # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) - except Exception as e: - print(f"Warning: Plotting error for {f}: {e}") - ax[1].legend() - fig.savefig(save_dir / "results.png", dpi=200) - plt.close() diff --git a/utils/triton.py b/utils/triton.py deleted file mode 100644 index a94ef0a..0000000 --- a/utils/triton.py +++ /dev/null @@ -1,85 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" Utils to interact with the Triton Inference Server -""" - -import typing -from urllib.parse import urlparse - -import torch - - -class TritonRemoteModel: - """ A wrapper over a model served by the Triton Inference Server. It can - be configured to communicate over GRPC or HTTP. It accepts Torch Tensors - as input and returns them as outputs. - """ - - def __init__(self, url: str): - """ - Keyword arguments: - url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 - """ - - parsed_url = urlparse(url) - if parsed_url.scheme == "grpc": - from tritonclient.grpc import InferenceServerClient, InferInput - - self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client - model_repository = self.client.get_model_repository_index() - self.model_name = model_repository.models[0].name - self.metadata = self.client.get_model_metadata(self.model_name, as_json=True) - - def create_input_placeholders() -> typing.List[InferInput]: - return [ - InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] - - else: - from tritonclient.http import InferenceServerClient, InferInput - - self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client - model_repository = self.client.get_model_repository_index() - self.model_name = model_repository[0]['name'] - self.metadata = self.client.get_model_metadata(self.model_name) - - def create_input_placeholders() -> typing.List[InferInput]: - return [ - InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] - - self._create_input_placeholders_fn = create_input_placeholders - - @property - def runtime(self): - """Returns the model runtime""" - return self.metadata.get("backend", self.metadata.get("platform")) - - def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: - """ Invokes the model. Parameters can be provided via args or kwargs. - args, if provided, are assumed to match the order of inputs of the model. - kwargs are matched with the model input names. - """ - inputs = self._create_inputs(*args, **kwargs) - response = self.client.infer(model_name=self.model_name, inputs=inputs) - result = [] - for output in self.metadata['outputs']: - tensor = torch.as_tensor(response.as_numpy(output['name'])) - result.append(tensor) - return result[0] if len(result) == 1 else result - - def _create_inputs(self, *args, **kwargs): - args_len, kwargs_len = len(args), len(kwargs) - if not args_len and not kwargs_len: - raise RuntimeError("No inputs provided.") - if args_len and kwargs_len: - raise RuntimeError("Cannot specify args and kwargs at the same time") - - placeholders = self._create_input_placeholders_fn() - if args_len: - if args_len != len(placeholders): - raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.") - for input, value in zip(placeholders, args): - input.set_data_from_numpy(value.cpu().numpy()) - else: - for input in placeholders: - value = kwargs[input.name] - input.set_data_from_numpy(value.cpu().numpy()) - return placeholders From cd5d1fa264d8d7566e7992600a8429d479d26831 Mon Sep 17 00:00:00 2001 From: Giannis Pastaltzidis Date: Sun, 6 Nov 2022 23:08:50 +0200 Subject: [PATCH 4/4] [Fix]: Address the requested changes --- SimpleHigherHRNet.py | 113 +--- export.py | 623 ------------------ misc/__pycache__/HeatmapParser.cpython-38.pyc | Bin 0 -> 7615 bytes misc/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 154 bytes misc/__pycache__/utils.cpython-38.pyc | Bin 0 -> 18196 bytes misc/__pycache__/visualization.cpython-38.pyc | Bin 0 -> 9214 bytes misc/utils.py | 21 +- scripts/export_model.py | 260 ++++++++ scripts/live-demo.py | 9 +- utils/__init__.py | 71 -- .../__pycache__/augmentations.cpython-38.pyc | Bin 13747 -> 0 bytes utils/__pycache__/autoanchor.cpython-38.pyc | Bin 6508 -> 0 bytes utils/__pycache__/downloads.cpython-38.pyc | Bin 5324 -> 0 bytes utils/__pycache__/general.cpython-38.pyc | Bin 37812 -> 0 bytes utils/__pycache__/metrics.cpython-38.pyc | Bin 11600 -> 0 bytes utils/__pycache__/plots.cpython-38.pyc | Bin 21672 -> 0 bytes utils/yolov5/__init__.py | 71 ++ .../__pycache__/__init__.cpython-38.pyc | Bin 2301 -> 2298 bytes .../__pycache__/dataloaders.cpython-38.pyc | Bin 41487 -> 41487 bytes .../yolov5/__pycache__/general.cpython-38.pyc | Bin 0 -> 37819 bytes .../__pycache__/torch_utils.cpython-38.pyc | Bin 16799 -> 16790 bytes utils/{ => yolov5}/dataloaders.py | 4 +- utils/{ => yolov5}/general.py | 2 +- utils/{ => yolov5}/torch_utils.py | 2 +- 24 files changed, 357 insertions(+), 819 deletions(-) delete mode 100644 export.py create mode 100644 misc/__pycache__/HeatmapParser.cpython-38.pyc create mode 100644 misc/__pycache__/__init__.cpython-38.pyc create mode 100644 misc/__pycache__/utils.cpython-38.pyc create mode 100644 misc/__pycache__/visualization.cpython-38.pyc create mode 100644 scripts/export_model.py delete mode 100644 utils/__pycache__/augmentations.cpython-38.pyc delete mode 100644 utils/__pycache__/autoanchor.cpython-38.pyc delete mode 100644 utils/__pycache__/downloads.cpython-38.pyc delete mode 100644 utils/__pycache__/general.cpython-38.pyc delete mode 100644 utils/__pycache__/metrics.cpython-38.pyc delete mode 100644 utils/__pycache__/plots.cpython-38.pyc create mode 100644 utils/yolov5/__init__.py rename utils/{ => yolov5}/__pycache__/__init__.cpython-38.pyc (81%) rename utils/{ => yolov5}/__pycache__/dataloaders.cpython-38.pyc (63%) create mode 100644 utils/yolov5/__pycache__/general.cpython-38.pyc rename utils/{ => yolov5}/__pycache__/torch_utils.cpython-38.pyc (70%) rename utils/{ => yolov5}/dataloaders.py (99%) rename utils/{ => yolov5}/general.py (99%) rename utils/{ => yolov5}/torch_utils.py (99%) diff --git a/SimpleHigherHRNet.py b/SimpleHigherHRNet.py index 7eced8a..85a7ada 100644 --- a/SimpleHigherHRNet.py +++ b/SimpleHigherHRNet.py @@ -9,83 +9,8 @@ from misc.HeatmapParser import HeatmapParser from misc.utils import get_multi_scale_size, resize_align_multi_scale, get_multi_stage_outputs, aggregate_results, get_final_preds, bbox_iou,TRTModule_hrnet from collections import OrderedDict,namedtuple -# from cuda import cuda, nvrtc - -# class HostDeviceMem(object): -# def __init__(self, host_mem, device_mem): -# self.host = host_mem -# self.device = device_mem - -# def __str__(self): -# return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device) - -# def __repr__(self): -# return self.__str__() -# class TrtModel: - -# def __init__(self,engine_path,max_batch_size=1,dtype=np.float32): - -# self.engine_path = engine_path -# self.dtype = dtype -# self.logger = trt.Logger(trt.Logger.WARNING) -# self.runtime = trt.Runtime(self.logger) -# self.engine = self.load_engine(self.runtime, self.engine_path) -# self.max_batch_size = max_batch_size -# self.inputs, self.outputs, self.bindings, self.stream = self.allocate_buffers() -# self.context = self.engine.create_execution_context() - - - -# @staticmethod -# def load_engine(trt_runtime, engine_path): -# trt.init_libnvinfer_plugins(None, "") -# with open(engine_path, 'rb') as f: -# engine_data = f.read() -# engine = trt_runtime.deserialize_cuda_engine(engine_data) -# return engine - -# def allocate_buffers(self): - -# inputs = [] -# outputs = [] -# bindings = [] -# # stream = cuda.Stream() -# err, stream = cuda.cuStreamCreate(0) - -# for binding in self.engine: -# size = trt.volume(self.engine.get_binding_shape(binding)) * self.max_batch_size -# err, dXclass = cuda.cuMemAlloc(size) -# err, dYclass = cuda.cuMemAlloc(size) -# err, dOutclass = cuda.cuMemAlloc(size) -# host_mem = cuda.pagelocked_empty(size, self.dtype) -# device_mem = cuda.mem_alloc(host_mem.nbytes) - -# bindings.append(int(device_mem)) - -# if self.engine.binding_is_input(binding): -# inputs.append(HostDeviceMem(host_mem, device_mem)) -# else: -# outputs.append(HostDeviceMem(host_mem, device_mem)) - -# return inputs, outputs, bindings, stream - - -# def __call__(self,x:np.ndarray,batch_size=2): - -# x = x.astype(self.dtype) - -# np.copyto(self.inputs[0].host,x.ravel()) - -# for inp in self.inputs: -# cuda.memcpy_htod_async(inp.device, inp.host, self.stream) - -# self.context.execute_async(batch_size=batch_size, bindings=self.bindings, stream_handle=self.stream.handle) -# for out in self.outputs: -# cuda.memcpy_dtoh_async(out.host, out.device, self.stream) - - -# self.stream.synchronize() -# return [out.host.reshape(batch_size,-1) for out in self.outputs] + + class SimpleHigherHRNet: """ SimpleHigherHRNet class. @@ -107,7 +32,7 @@ def __init__(self, max_nof_people=30, max_batch_size=32, device=torch.device("cpu"), - trt_=False): + enable_tensorrt=False): """ Initializes a new SimpleHigherHRNet object. HigherHRNet is initialized on the torch.device("device") and @@ -151,7 +76,7 @@ def __init__(self, self.max_nof_people = max_nof_people self.max_batch_size = max_batch_size self.device = device - self.trt_=trt_ + self.enable_tensorrt=enable_tensorrt # assert nof_joints in (14, 15, 17) if self.nof_joints == 14: @@ -168,31 +93,7 @@ def __init__(self, else: raise ValueError('Wrong model name.') - # checkpoint = torch.load(checkpoint_path, map_location=self.device) - # if 'model' in checkpoint: - # checkpoint = checkpoint['model'] - # # fix issue with official high-resolution weights - # checkpoint = OrderedDict([(k[2:] if k[:2] == '1.' else k, v) for k, v in checkpoint.items()]) - # self.model.load_state_dict(checkpoint) - - # if 'cuda' in str(self.device): - # print("device: 'cuda' - ", end="") - - # if 'cuda' == str(self.device): - # # if device is set to 'cuda', all available GPUs will be used - # print("%d GPU(s) will be used" % torch.cuda.device_count()) - # device_ids = None - # else: - # # if device is set to 'cuda:IDS', only that/those device(s) will be used - # print("GPU(s) '%s' will be used" % str(self.device)) - # device_ids = [int(x) for x in str(self.device)[5:].split(',')] - - # self.model = torch.nn.DataParallel(self.model, device_ids=device_ids) - # elif 'cpu' == str(self.device): - # print("device: 'cpu'") - # else: - # raise ValueError('Wrong device name.') - if not trt_: + if not self.enable_tensorrt: checkpoint = torch.load(checkpoint_path, map_location=self.device) if 'model' in checkpoint: checkpoint = checkpoint['model'] @@ -219,10 +120,8 @@ def __init__(self, self.model = self.model.to(device) self.model.eval() else: - # import pycuda.driver as cuda - # self.model = TrtModel('pose_higher_hrnet_w32_512.engine') if device.type == 'cpu': - device = torch.device('cuda:0') + raise ValueError('TensorRT does not support cpu device.') self.model=TRTModule_hrnet(path=checkpoint_path,device=self.device) self.output_parser = HeatmapParser(num_joints=self.nof_joints, diff --git a/export.py b/export.py deleted file mode 100644 index c4b66da..0000000 --- a/export.py +++ /dev/null @@ -1,623 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit - -Format | `export.py --include` | Model ---- | --- | --- -PyTorch | - | yolov5s.pt -TorchScript | `torchscript` | yolov5s.torchscript -ONNX | `onnx` | yolov5s.onnx -OpenVINO | `openvino` | yolov5s_openvino_model/ -TensorRT | `engine` | yolov5s.engine -CoreML | `coreml` | yolov5s.mlmodel -TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ -TensorFlow GraphDef | `pb` | yolov5s.pb -TensorFlow Lite | `tflite` | yolov5s.tflite -TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite -TensorFlow.js | `tfjs` | yolov5s_web_model/ -PaddlePaddle | `paddle` | yolov5s_paddle_model/ - -Requirements: - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU - -Usage: - $ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... - -Inference: - $ python detect.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s_paddle_model # PaddlePaddle - -TensorFlow.js: - $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example - $ npm install - $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model - $ npm start -""" - -import argparse -import json -import os -import platform -import re -import subprocess -import sys -import time -import warnings -from pathlib import Path - -import pandas as pd -import torch -from torch.utils.mobile_optimizer import optimize_for_mobile - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -if platform.system() != 'Windows': - ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -# from models.experimental import attempt_load -# from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel -from utils.dataloaders import LoadImages -from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, - check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) -from utils.torch_utils import select_device, smart_inference_mode -from models.higherhrnet import HigherHRNet - -MACOS = platform.system() == 'Darwin' # macOS environment - - -def export_formats(): - # YOLOv5 export formats - x = [ - ['PyTorch', '-', '.pt', True, True], - ['TorchScript', 'torchscript', '.torchscript', True, True], - ['ONNX', 'onnx', '.onnx', True, True], - ['OpenVINO', 'openvino', '_openvino_model', True, False], - ['TensorRT', 'engine', '.engine', False, True], - ['CoreML', 'coreml', '.mlmodel', True, False], - ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], - ['TensorFlow GraphDef', 'pb', '.pb', True, True], - ['TensorFlow Lite', 'tflite', '.tflite', True, False], - ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], - ['TensorFlow.js', 'tfjs', '_web_model', False, False], - ['PaddlePaddle', 'paddle', '_paddle_model', True, True],] - return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) - - -def try_export(inner_func): - # YOLOv5 export decorator, i..e @try_export - inner_args = get_default_args(inner_func) - - def outer_func(*args, **kwargs): - prefix = inner_args['prefix'] - try: - with Profile() as dt: - f, model = inner_func(*args, **kwargs) - LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') - return f, model - except Exception as e: - LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') - return None, None - - return outer_func - - -@try_export -def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): - # YOLOv5 TorchScript model export - LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = file.with_suffix('.torchscript') - - ts = torch.jit.trace(model, im, strict=False) - d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} - extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() - if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html - optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) - else: - ts.save(str(f), _extra_files=extra_files) - return f, None - - -@try_export -def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): - # YOLOv5 ONNX export - check_requirements('onnx') - import onnx - - LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') - f = file.with_suffix('.onnx') - - output_names = ['output0', 'output1'] #if isinstance(model, SegmentationModel) else ['output0'] - if dynamic: - dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640) - if isinstance(model, SegmentationModel): - dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) - dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160) - elif isinstance(model, DetectionModel): - dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) - - torch.onnx.export( - model.cpu() if dynamic else model, # --dynamic only compatible with cpu - im.cpu() if dynamic else im, - f, - verbose=False, - opset_version=opset, - do_constant_folding=True, - input_names=['images'], - output_names=output_names, - dynamic_axes=dynamic or None) - - # Checks - model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - - # Metadata - d = {'stride': int(max(model.stride)), 'names': model.names} - for k, v in d.items(): - meta = model_onnx.metadata_props.add() - meta.key, meta.value = k, str(v) - onnx.save(model_onnx, f) - - # Simplify - if simplify: - try: - cuda = torch.cuda.is_available() - check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) - import onnxsim - - LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify(model_onnx) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - LOGGER.info(f'{prefix} simplifier failure: {e}') - return f, model_onnx - - -@try_export -def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): - # YOLOv5 OpenVINO export - check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie - - LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') - f = str(file).replace('.pt', f'_openvino_model{os.sep}') - - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.run(cmd.split(), check=True, env=os.environ) # export - yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml - return f, None - - -@try_export -def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')): - # YOLOv5 Paddle export - check_requirements(('paddlepaddle', 'x2paddle')) - import x2paddle - from x2paddle.convert import pytorch2paddle - - LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...') - f = str(file).replace('.pt', f'_paddle_model{os.sep}') - - pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export - yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml - return f, None - - -@try_export -def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): - # YOLOv5 CoreML export - check_requirements('coremltools') - import coremltools as ct - - LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') - f = file.with_suffix('.mlmodel') - - ts = torch.jit.trace(model, im, strict=False) # TorchScript model - ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) - bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) - if bits < 32: - if MACOS: # quantization only supported on macOS - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning - ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) - else: - print(f'{prefix} quantization only supported on macOS, skipping...') - ct_model.save(f) - return f, ct_model - - -@try_export -def export_engine(model, im, file, half, dynamic, simplify, workspace=2, verbose=False, prefix=colorstr('TensorRT:')): - # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt - assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' - try: - import tensorrt as trt - except Exception: - if platform.system() == 'Linux': - check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') - import tensorrt as trt - - if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 - # grid = model.model[-1].anchor_grid - # model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - # export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 - # model.model[-1].anchor_grid = grid - export_onnx(model, im, file, 12, dynamic, simplify) - else: # TensorRT >= 8 - check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 - onnx = file.with_suffix('.onnx') - - LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' - f = file.with_suffix('.engine') # TensorRT engine file - logger = trt.Logger(trt.Logger.INFO) - if verbose: - logger.min_severity = trt.Logger.Severity.VERBOSE - - builder = trt.Builder(logger) - config = builder.create_builder_config() - config.max_workspace_size = workspace * 1 << 30 - # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice - - flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) - network = builder.create_network(flag) - parser = trt.OnnxParser(network, logger) - if not parser.parse_from_file(str(onnx)): - raise RuntimeError(f'failed to load ONNX file: {onnx}') - - inputs = [network.get_input(i) for i in range(network.num_inputs)] - outputs = [network.get_output(i) for i in range(network.num_outputs)] - for inp in inputs: - LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}') - for out in outputs: - LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}') - - if dynamic: - if im.shape[0] <= 1: - LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument") - profile = builder.create_optimization_profile() - for inp in inputs: - profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) - config.add_optimization_profile(profile) - - LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}') - if builder.platform_has_fast_fp16 : - config.set_flag(trt.BuilderFlag.FP16) - with builder.build_engine(network, config) as engine, open(f, 'wb') as t: - t.write(engine.serialize()) - return f, None - - -@try_export -def export_saved_model(model, - im, - file, - dynamic, - tf_nms=False, - agnostic_nms=False, - topk_per_class=100, - topk_all=100, - iou_thres=0.45, - conf_thres=0.25, - keras=False, - prefix=colorstr('TensorFlow SavedModel:')): - # YOLOv5 TensorFlow SavedModel export - try: - import tensorflow as tf - except Exception: - check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}") - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - from models.tf import TFModel - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = str(file).replace('.pt', '_saved_model') - batch_size, ch, *imgsz = list(im.shape) # BCHW - - tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) - im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow - _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) - outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) - keras_model.trainable = False - keras_model.summary() - if keras: - keras_model.save(f, save_format='tf') - else: - spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(spec) - frozen_func = convert_variables_to_constants_v2(m) - tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec]) - tfm.__call__(im) - tf.saved_model.save(tfm, - f, - options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version( - tf.__version__, '2.6') else tf.saved_model.SaveOptions()) - return f, keras_model - - -@try_export -def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): - # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = file.with_suffix('.pb') - - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) - frozen_func = convert_variables_to_constants_v2(m) - frozen_func.graph.as_graph_def() - tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) - return f, None - - -@try_export -def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): - # YOLOv5 TensorFlow Lite export - import tensorflow as tf - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - batch_size, ch, *imgsz = list(im.shape) # BCHW - f = str(file).replace('.pt', '-fp16.tflite') - - converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - converter.target_spec.supported_types = [tf.float16] - converter.optimizations = [tf.lite.Optimize.DEFAULT] - if int8: - from models.tf import representative_dataset_gen - dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) - converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.target_spec.supported_types = [] - converter.inference_input_type = tf.uint8 # or tf.int8 - converter.inference_output_type = tf.uint8 # or tf.int8 - converter.experimental_new_quantizer = True - f = str(file).replace('.pt', '-int8.tflite') - if nms or agnostic_nms: - converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) - - tflite_model = converter.convert() - open(f, "wb").write(tflite_model) - return f, None - - -@try_export -def export_edgetpu(file, prefix=colorstr('Edge TPU:')): - # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ - cmd = 'edgetpu_compiler --version' - help_url = 'https://coral.ai/docs/edgetpu/compiler/' - assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: - LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') - sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system - for c in ( - 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', - 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', - 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): - subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) - ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] - - LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') - f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model - f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - - cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" - subprocess.run(cmd.split(), check=True) - return f, None - - -@try_export -def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): - # YOLOv5 TensorFlow.js export - check_requirements('tensorflowjs') - import tensorflowjs as tfjs - - LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') - f = str(file).replace('.pt', '_web_model') # js dir - f_pb = file.with_suffix('.pb') # *.pb path - f_json = f'{f}/model.json' # *.json path - - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ - f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' - subprocess.run(cmd.split()) - - json = Path(f_json).read_text() - with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order - subst = re.sub( - r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' - r'"Identity_1": {"name": "Identity_1"}, ' - r'"Identity_2": {"name": "Identity_2"}, ' - r'"Identity_3": {"name": "Identity_3"}}}', json) - j.write(subst) - return f, None - - -@smart_inference_mode() -def run( - data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' - weights=ROOT / 'pose_higher_hrnet_w32_512', # weights path - imgsz=(512, 960), # image (height, width) - batch_size=1, # batch size - device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu - include=('torchscript', 'onnx'), # include formats - half=False, # FP16 half-precision export - inplace=False, # set YOLOv5 Detect() inplace=True - keras=False, # use Keras - optimize=False, # TorchScript: optimize for mobile - int8=False, # CoreML/TF INT8 quantization - dynamic=False, # ONNX/TF/TensorRT: dynamic axes - simplify=False, # ONNX: simplify model - opset=12, # ONNX: opset version - verbose=False, # TensorRT: verbose log - workspace=4, # TensorRT: workspace size (GB) - nms=False, # TF: add NMS to model - agnostic_nms=False, # TF: add agnostic NMS to model - topk_per_class=100, # TF.js NMS: topk per class to keep - topk_all=100, # TF.js NMS: topk for all classes to keep - iou_thres=0.45, # TF.js NMS: IoU threshold - conf_thres=0.25, # TF.js NMS: confidence threshold -): - t = time.time() - include = [x.lower() for x in include] # to lowercase - fmts = tuple(export_formats()['Argument'][1:]) # --include arguments - flags = [x in include for x in fmts] - assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' - jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans - file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights - - # Load PyTorch model - device = select_device(device) - if half: - assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' - assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' - # model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model - model = HigherHRNet(32,17) - model.load_state_dict(torch.load(weights)) - model.cuda() - # Checks - imgsz *= 2 if len(imgsz) == 1 else 1 # expand - if optimize: - assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' - - # Input - # # gs = int(max(model.stride)) # grid size (max stride) - # imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples - im = torch.zeros(batch_size, 3, 512,960).to(device) # image size(1,3,320,192) BCHW iDetection - - # Update model - model.eval() - # for k, m in model.named_modules(): - # if isinstance(m, Detect): - # m.inplace = inplace - # m.dynamic = dynamic - # m.export = True - - for _ in range(2): - y = model(im) # dry runs - if half and not coreml: - im, model = im.half(), model.half() # to FP16 - # shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape - # print(y) - # shape = y.shape - # metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata - # LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)") - - # Exports - f = [''] * len(fmts) # exported filenames - warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning - if jit: # TorchScript - f[0], _ = export_torchscript(model, im, file, optimize) - if engine: # TensorRT required before ONNX - f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) - if onnx or xml: # OpenVINO requires ONNX - f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify) - if xml: # OpenVINO - f[3], _ = export_openvino(file, metadata, half) - if coreml: # CoreML - f[4], _ = export_coreml(model, im, file, int8, half) - if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats - assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' - assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' - f[5], s_model = export_saved_model(model.cpu(), - im, - file, - dynamic, - tf_nms=nms or agnostic_nms or tfjs, - agnostic_nms=agnostic_nms or tfjs, - topk_per_class=topk_per_class, - topk_all=topk_all, - iou_thres=iou_thres, - conf_thres=conf_thres, - keras=keras) - if pb or tfjs: # pb prerequisite to tfjs - f[6], _ = export_pb(s_model, file) - if tflite or edgetpu: - f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) - if edgetpu: - f[8], _ = export_edgetpu(file) - if tfjs: - f[9], _ = export_tfjs(file) - if paddle: # PaddlePaddle - f[10], _ = export_paddle(model, im, file, metadata) - - # Finish - f = [str(x) for x in f if x] # filter out '' and None - if any(f): - cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type - dir = Path('segment' if seg else 'classify' if cls else '') - h = '--half' if half else '' # --half FP16 inference arg - s = "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ - "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference" if seg else '' - LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' - f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" - f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" - f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" - f"\nVisualize: https://netron.app") - return f # return list of exported files/dirs - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'weights/pose_higher_hrnet_w32_512.pth', help='model.pt path(s)') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[512, 960], help='image (h, w)') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--half', action='store_true', help='FP16 half-precision export') - parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') - parser.add_argument('--keras', action='store_true', help='TF: use Keras') - parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') - parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') - parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') - parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') - parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') - parser.add_argument('--workspace', type=int, default=1, help='TensorRT: workspace size (GB)') - parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') - parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model') - parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') - parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') - parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') - parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') - parser.add_argument( - '--include', - nargs='+', - default=['torchscript'], - help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle') - opt = parser.parse_args() - print_args(vars(opt)) - return opt - - -def main(opt): - for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/misc/__pycache__/HeatmapParser.cpython-38.pyc b/misc/__pycache__/HeatmapParser.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9a14dc7521bfbac809f43db6b943f61b253a9c3 GIT binary patch literal 7615 zcmb7J-ESP%b-(w{$IgCoxuhtPlAW!i##)DwV%fC?m5|*=b)29k3|Rr%g_Xr=Eg ze#~Od+_`t|xgY1;bAIRS_lm_F!}Z|fyUibeow0wTm$R3Rm$z|8zYijrJFzitK)Vv^$g0kbRMy~_iZL!nR&!yCdeLG^_=7%u$p%aj%=gG)4U8? z{;(6vuo>gE_%Muyeka`by95q{pK^KiWvMCcdl>syTSF%0ebkI>%NqMY(#Ni`J2J@DC`b8 z!8hIwH@5=y?%je^s0Ee!S=U4IvM+}z62$~+LpJ)Ywg zuI5l~QpMSeg!VS>=zWlY)wyI4$0eDQ`9~ZADaax=-j*dfho>XUavo1tR^$Sn8M!Dg z;OWUFc@fX7yeyaH3VP<`Dn#=_TAZ2X@jOOJ?PkB(KYkI#V}}R{=2(Dy_S3(j>mT0w z3@^C;{jGb=yeVo*W$K}&b{B<2NWSexg_!**B$u!TPawU9NGwoGQrV83wmZ%wcH$)N z5!HK11~i*^pt&Rqnon||1!-Z*_Wog1Zty=u0&s)oH@*xRi3d)2`ngwx!bepwT$ zg88J`rs{tr2mL6xw^1vmrQeAl8`8etQ>w4hTyNNIwENJ9D9!3;BMRcQL{g)sLC}Y& z(_-vzqO8oPsiVh&X7$qSd;aeG{eI_$gbw<<0~FH*8N@-825H1wP_M0iM;aEVuk*tw zXq?j2pkvBWtU?*o z^He{8s$xsjSLyRBM1GUV*NBj28&X0k^%{+^7sibSZB(=f!g2-Sa`UT9p8HEP3UlqG zQGMSW_vdI=Bp6_8W}JE_y^ z^!>PIsh6q7!Y0*iY;E`a{4K1PEXCwd<#-tfYkz)ry7*>hrnHS2+%)589n{#Fgtf&A zY~YHX;fL198rdT!vD((ijqSFBb{E=fwKLLMVHa6TB(Aidh{r6k#~#*YYAmCj#ptv*ab^J7^otqiq3Wwc=t@KEUa3SM=B|aIo8p$-LRK(LCDc+Zk}P zwi&+vQ8GWiAhXbx`RR(`cWC7;>~=CwyP)?$Q>t5LqY8K~B^B&5jN%h(w2&-}FCMY+ zGNiGHu~(i7#bSHBnk*b~SzKgO-mpX9j5UDELQ*liH@cupC9G^IxsWVv+vxv}mg)ki zgF4hMPbDwsNDE+wNy-cHC5(=d$Cq)xKsz!u8+sj=l1t;4WES!foc7a6F3~p$xs&;g z)aff3sAD=$Y6;{SPhA;244XksT-(5#CBtqL4Ko)Rj|JRO0R%%aY#ZGDiAfNt)9mzn z0kMkx=9WJC01(~;Pc+V&vGtDMeIWhYH^J&ZakKp!vo=7x-^PQz23c`!y05Kw`c1zR z-A2K5R^;<$5D86@EQOvX*|bgQ{{0t6JZ5d&V{r%zN3wu!aw(ozKNdSS_QcYx?mZLg z20Epg?I0Ln3sA2uoZSt4xD7vQ#6d6Wt7jq=*S?wZkd{XiB&>91dAapY7{vr;Zodb9 z5t&Sv3AX?G|7VdWENUX<13j^02}bGy$R_S;V=R963(Kdm^zz9$=;aqi+$`HH_KZnh z$0t}q&N$K8MX0yY_(R;&+1I|RNMq8x@x5lz+Qoen)n=!h{Xq32?fp_G+VbIlU!gu$ z(~lKNCv}wH+YGd?ORY}OORfEIklJA!bW@wQEw#6Uy-2%F;{t!1x@CvGXlED%`$1~M z<95|+H0J9>$XSAIf7p{6o?)=TFm(V027B6_0z(c4K~G{vP|R(3=n!n&ut%_>cG>tW zTl%qIH*0I?^8v^J(Qc;>^o}0z_l<2uYprL@j%};kG`1DW-PFd%QQb9rlfwB$+o^@R z)X`O`g;muHo0#SRu#}c~`bJ$-TZXMxJx{bjD-0y?AH78WLzJ@wZ8W9@Xh3noDRl;Z zsG`&YWWvIMWGofL>U*>?gdL#5zeVM55qXP<#u1e-IbUdu_CXl0h#Zi_i^Ai}qRee_ z(kOd;3AZOoT!~m0vrDCN1;i zBF%7dQ(g26kSX?~*o{l_D3&fEb{pBcWTE8fl8us!5=97zDUU3Q_}YjH+O|PZ@SmB- z@Px3p6K?tmUWuDHiwu42$AmSU=u@<~lCWAv0*6t3?6tF>F!(rO5Lleky)i1GFCY+G z`)`jh(;^$^kAR{sv5^NO=Pm(}(Q}oJ3-GTw;)UoW2VSOE9uZ4E$;jMaTR>$6XheQP z8-Hz9wKr7Pup!5v<5I7pG@~zM!YC-z`a-@!Ti92~!HoGKxfOzbXK2D(Hi6Vbb~JG| z&%}lv2GNdoF7BouBPLa3jNw?U5N7QAD4)@@xAFQnxQ!_m;`9A)%*r|L4;tHFbdx6@ zy|xvp{V)tZf)VQvwvFkf$TD?<-GPt!=xBy!tL_nbmk7nWu-N_H zCK$a;HON>nJ#`bJgQmH4 zwB5{xU77q!upm{~sfw^+T?)@_Tl~_^mSKGgqGAb-->+Qv{E;}rb>>LWm?T*;l+hco z3o)RAhWq^f*Pze{E>L4nYAUF45N1HtbPc=zoRl76fky#}6x+61Lm8np0dKTrp^QAq z9dOX>0T&1rpny0+0E-#G$6``U@-SBjpf3W>w*isC$696w8usc|n_8E_-PB=>ze}>x zeiKUH^Qd%_0+J%Qj zFzl&!@EKXe-CD``WXtbGTC3{b14_pn!v4St#!zeZ<=V#~=RYBkY^{)W;R1pRK23(mZG=&Si%=cGkPv2Q4 zGY*OpH5uGmr+y0C0eul|3h+3>zE_ZV!4LLghLXJ)iA@SmDR@P|wF0Auu;dN}w+L2k zk|V+}TdmVOX`8f3%f=Q3Jx3hc=8ll88N1LgNG@?H{GtG4XB}Tq8b`D}={ynbEE#N> zK{(`L#O&;dPx<{8nS6|SW`@KI+5yhzYqh>lR$~G_X)mxg^htV85Y#SEc$`Bxg7Ei< zBakcTP*m$aYGIDyAad9rq0f?=xZ@&Xj1mlaDalOBbEv1_$weJKGYFax5Yw)V$_Q7p zI1gykPu&)mlaeezm%&>i43~xJsY1_qo?3CL01=1>f=I4@20lgNgY$qA&V!4Ymy@|E zA3B|yj~N-Q^U|VreXjIuG^gj7331AR6H4HOL;)m&87AcxoGPU9w^%)D;a1_SU^}u1 zI)4IL@H5h{LlW7u(#JQqX7oYm8u~`-gFfw~FOxT%-D&@memcYi7o6LY|6LfpX)f;D z;lrR;rCd|!cdCG^14_762P&X20co;mt@=?=MGmVMhpoLTlBp4;^$h-43)qbl=D79wuim(+N$q?;0OS8tziQa3_WE(P)d>gZd7R8W`ky$x zUOUTTt=d5jxrzv|8b;NtyKB|GHCjVer=HiUn3gG?nJ`fIQ1xz2N7pD~0xc-UR@Xu9 zDGGQstWXI`Gife@wghQEz#UO!gA*xEywLqs05UNKXiBUleU_=T%0_Ib5YtzHw*+ua zdd^Bc6+8@()0T?ZA(R;+Ct9;9jV^=TiK*=mV;xtXH;ct#Pa){&AlHbGe@^ou1i=93 z3%o=?{nc3usDf75qbw-WxuSm1{s>%Z74;oF^c*yl0PH}9U!x&K*L#dEQ43+{mO(zB21jr(WH%T1QD)^o)Bt`W*VUO?<@>2Cx4F#vyK)30S?M7)nREQcR}g))7BBI-Nu z;LU9b*du2)P$S?yJsefMXyCm}k;epXBVK?DL{e&!=IG+18rPbmvGqBL>HNkDOx`rV zvH$X{Qp0pb!KpT%=5$W6O)aOkF^r+;kx%~%ie4v?p Y*?dxO-7NAp4X^W{w5=3y6fDpCZ(^kY)c^nh literal 0 HcmV?d00001 diff --git a/misc/__pycache__/__init__.cpython-38.pyc b/misc/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74e81f1481199e06adf099b41c36c8664efd375d GIT binary patch literal 154 zcmWIL<>g`kf~3})E}%J3!?>^k1@9)6@NRPft(x_wKLnW94$e!Y_YpsWrD?S>Iw|@|Qv3 zIb7kJwq+?x**&Xn?snalvQu~E?$tfqonB_yulvi{de%1Wa`n7=Ub)J9$F3JtM)~hp z^`gqE9PXvS?UwPB)pn4psQkN*vKAdxP(`$!QYBSJt7%nHQ@HO@(`paydzBSf-F;}c zSM5W)88xHs1N=Vqf;ymP0oku!REN}IVDMCs-x-yxF1wi^#JaN)G_rS?uXS& z>bQCcEsv-Z>R~|cSC>>xodo2ldRd)Tk0SR0^@@5-ok6au&Z@`J>H!&nt)5U%0(wk6 zrJe@#!4&$8`Vl~nt8?l{0ewiFS3idP3H7YHfcwKQTk1J=@f}ND47~1Kx3=!qPb&L$ ztM=+`&i#DNNs6y&73e^{*l9&cq2F8%RJ1be1ucgy(w~bv7w83C;VC4MwPD9r*NN?e zSgKhphjKRE4HqzP(_V98YavzvTx-?i09&HbsVbttB+Egk~jS%l>wu48*&N4pz zgRgYjOM!mn@_Z0Iy4(p{kFG?WUU+(VD=G3D8m52ZbLb(lysEAD;J$6nSmWc8IeQD4 z$inQ{>$bA5I^q5ZGq!HOZAX|hBJI1*nibnGTA#ea*(0ic%84D7IS4d8Xl;0#)*4%m z;J$%RK&0h)E8f-WymjqFY+)U;^S3#u)#nB)(QqYd@LPs~sxCFdYBZ=W1l6BhXhyB2 zM%Y;mPF4FW%Z=`!(~rVa)ujMa5}m5v=%{GvlbX2jo~_HcX|9!KzqL%__JY37A)iz( zhGC#1tm;cz5422Al4qcHJ?&5lmoXn81WW|C=ko0s`t zXtu7-pcqz=SoVIWoJPQ2D&cneG7n=Ce>fuVJ7(^)Iwo>y6xR1V};-chb#I{nz2u?|>q86=r5Fs9y$i-2v_QU1u{ zqb{fNSb{uLyuC8{oP5`|`b|47p{I(P=piahz6}Ug-l{!ltve_e__nQg;)#j(;E9R; zHX=7fd`m^I2Kt?TC6HkF{6zAVJ3;W+Lq?yih zTc~#XH&EF&4ZiW)HRm>E#cc`?O~gr`_{IhOFiN#@Qs{)8K3H!bjN9oClWbo#wQk-@ zyj~}alFU$p<0KgabB3KH+YX|J0zb$u_6E)9@v}*0HPC}FDYN2n4W>aRmAI|zXOq2v zU0hu3^n)u97vbVSFW06MzZLYOKtp!4n!O-#^&m=$DGMU6qfqHAq6^51dR`hezKPSAquWxF#nTPlR7sY#GBDON;{pPy!_SEL`kiz&!x1-ScnxX5@jZ%p5t#}` zX6NjRQ$mVAr(*8~Lz}jz!P<`C?}T&KKI{1QA!pXn&*Q0W;)c^c7SU%>Fd~}F1+0xM z%x$}ypusfnQhK z=Ku%p&5HywSV^?2xm#n_X9SSg&BR`S($LXR6LySOKaMsZ!({|I=3gM&2FTf^U=cx5 zM@DAY2E|w#804nyV6bbh0{?0|6df}6F1UltKlv5J4A?z6iqrv;)}a;n0z?h!yw@## z7~QAe0Icftf!%|!mH~1C=LFyNc{E9!u%9@suwEcs2jNO@*Oxtpb{uUe`Zq8tM{II5 z;oG=2<#raAeBU$3kW)igda=9it@+@V5S+0Og$3UTK10>TY*W2Qe9Nb;kMk?Pw}y*nfwS7jv^_eKG4HKuSs=6 zUj(e4CCTxD9g@py>A_H;MMxQBftoxli(~D#o$vb@%@uBuQbxaR-~68eh2jc%)TJcC z*N^k$icqY&BCO?(?_NYJNG_}EqqQ4lNAPtU%EK?Lc_~EZ3Xr$fs1kdaDQ6K<4QimL z{~(3f6A)kk+87Cgc3gpj7UZ0tw%uI=nFem|$Jtp662^@)u}3B5S{*WQqIB(**xgAi zMCw{T@hHPJ$y2CBlYxjLr}NAab&`xxz`Z`1YOW4W9jFk>c2k3`C3%9IJ>jMboIFhI zeqy)kCBmuMv30$Ymdr!D7-RwbnLHJbhj?YUABhESQL>ML8SaIKcSzXZw5|UVYV;hE zZ8S>y9MkA?D2%uuq#1bKh9hb;uoiRwjPOCsFXk9>kSaFoWA`vv3aun1v$gu;Vv}4B zq7~h*VwNaOU@Smi?hS6hf&tkr4SK42W2w^%s+~4IvD0r?uQz)uK?sdGTB8cAu5hYH1bhtC+3@G%~oq2 zPa0bHj3fcAlOmE_ySWmE9Y|iz(C9}38q19%AlnG|4@?$zyAJ&foD3}Ys-xecsj_ar z;jCrA4In`zZv!ud6^ow695u9DOxN{puT+A>SOGv zFS5kZEM`DE`PHw;I=%orT>(Q1s+jQ=-E7@rW}p>U9=6m+Uik4-(r{rVs;-2bQl>tO6@Z(NC=ay6IPqo z1KO;U1AtZ(Rv(738$N8_zBuRsN6~)aRux8a=Xyu2G<%~*I)QnQk(~h$LFBzY!JIJg z2avl>;;-4d#oLso^{;<|33+E-l(6OIO+()XqdgdTp)RovMehU)**8CkUZsf*i$4mK zF%5;1h2hYq6I`uLCBD(m^k3v#vP;Vi;ZPg|B!#*O8xag8u^WlKl-M`)C-|r#8Z1OT z+q~XxAOpDx)zI$jq?Y>vk|)-$F>GCBLs`SmqLYwv(aP?H3P8S7z~2$uyW`u=9nYaS zDLMOX&-S30&6*Y{YmH}GoU+l6f^^J?j-wD;Z+L6)jI6mJC=d@c8?fmZmL~`(6T^4` zdfBUAmX*J#gJyNG2)?mEtRYe@3~pBEnpNs9@PqV%uy(qTwrz*!BnKBubwS#lYxSC8 zc<#;q@N}wPzgauC^(=5fJTFu~t8+IS%botI>Me6asd-aM&6_pz2E+O$ULs8DT=n_M zu{AMdGy9KDMC zHZIRv%BmM|g?|Uhh|80cQ#T>M5j~ftzVLUZ0(H~2l&m(bN`ZQSvMqQQphyLp!9qD# zt#FAF^{NF92+f)Lo#=RUbot6&%VILEE?QzaA#K~x{GcafU}5?Z78V>dOW8~#z2-MR z4=j<@dX-vDh~=c`Yaxhk1OYm_0YA*jV7%mD9CL*Id6vD=tj_(=7M{4opDfG553ytU z_FWitw}GF6!w?2vZ40?Mpl#FE>nwL7utG3agMVW>VAD&~NTPE;5W`Q&gYuYRc{EcE zLlibh4p{TWm`Geu1`=rF*=8sPsy@cn#7`)&zCq#izMv!4U1zewfmFw|L1v7y3{{#lEx^&#W5)0Z=rx* zC-f~QvhHqYuxrtChQtRee}ZO4&J_!=Q$fu?3&=K$d>;V3gql~eidb-u)+xNF=V_5b z4na4E&k=r6{AIZ7g}*F!z3>OGWP8u1tqMQ}AcZPI<1X%_!T}pne2OJHupysD<+7Ba zji>cRDZ}5o0bgx5qe=&1VL~VORcTaScVOFoeXR(6y*Ta_auvGs7Nku3cC9=CDGHFq#SzBSOzCqyO#9c3iw99s#l!R!XBE7Z2VfDiytaq7^3^=__UEEp zoR2eCVJUph9z2Co#gx|e#(Uzuc>kVwkNE~w5>3B{-uP{Wcux`E1=~5T7ww;xL=jf> zUylnQqpA7!uMB^z@U_4FhZovd68w-^l;2k`v|st=>dY5jzI~yM259yEg|^(k_N@!; z4}9u7AN|dv|9u4c&V}|*q3JhQ|9I5T@#l=%jrBE>=Y8oONv)`B1kxL(A7Mh4K4F|w z4~HLe*la;XG^+WV1i8V@VQpVxw-Z~{-DacBMAiMU(_U_d^fj;J?b$Hr5Gf|g`G7;zkJ);&F-GhYmab;o4I>0$l z**kdKi|ChjDzFQ?5m7$QuG3D1#|C>9pc-xoowuYfcw*ANj-sCet|Imz3b28T@alWe z2@B$-`Qo)MN*U1);mCXd-?`0{Z~)+}ypBs$fi}MGlclxv`+m;3Mvo!wcvxwoi#UmM z`Y{noJC~?%o?iCWV0rlL2f0e@*N1;>^-;oBU=Lvnw9{+qP6T_P8ENdVhNsWdF71WS zA~Sj#s43Rkedxg8r&w^b_mX@7M+J=22xzj}8R|8*;o{2Zgw55;bzy`-*l3P(3-#=9 z09zTLG_z1I8mDH1TytDtD}9X}Qu@|?mhn(olq$AF9}9`yP35vwFYKI(!$07QPjGJb zqHoJS?i_))^pHJc9|fLbgYj?hy`M(XqHApQqq~u2iLh$pMj2=(AXKZ6(@9BcOEV< zX=`t5>uzhC)`3Yb;0p>Mf(-6(wJ7&0Ot1siCbq;(3GWB_#Fa%l)iTW&;BykK`w|#Q z6~^kI)iJJ*u^s{g|2jF5sP{0?HGBru@G5rsqd`Aa`iH~bErb&+53zsWtA@*iL9_%` zXvFlZ5$vXGEAaP+)w^~!X0_Hc zpSoXYstjSp+|34p)jsNAX=NE+BD0G-Hgh<{h9LaC9^dg1_Z$E_`T?UDcXRJyY|YZ2 zcPyCD@t5?2n^9xjMRg8F5I&yKA{}_mmYNYfsl6VMgUA9WE1X*Jv$38}R{KzAdsP)g zFsM;wIvRJ*DSk0nY+`%lT=lUr*zk?sa0!-kYHL;zfq)nonC9&MhJ-zgF>(@Ls#}cG zAPaa^&+cyJ_ zOH-#6iJoLYd5@pfSpwcfg8lQ1j3{w2-lV_@HY_K}NuJOtlN*0ZQrS+=cuZt8S38-x z8h85GcS;V^ihIn_zlv6uZtp>Q`z<6b>k! zg@I)kSNKQhl%7Ofxi)r0a21U98v9pmjqok10Bgnmmt_~$?#d17MS%P(?yhA3> z9T(&mX|Z|SXW9PDxVL5Djs(^rEX7h>fU=T4LHp4> zv1!}Z=j>}GJcYekwEJ7wnWDa)I}CNqhB^n>uSLbTq0}Mw_0OQS{`#k_MOzhB=^a>~ zv<}wt0x1imAQ0G*uuKc0`c%>wi-)o9x4(}NM@>C*6j8^$7NO}Rx6m=n@X~v3;z^W> zw>(fmQpP50yWaqt?gK%0SqfA_=`#=mOe}>Hv=qAV8o){1e=h@E}nfxOr zzsZ(<6XBOIqnA)1m*w*Iz<-zV?Ma>+#}cBMl+%ekD>eEb^Vvdwu$ac@B^B$tx62VH3@B=9vDO=g)v$&f16EBjU{_4#?m4{4D+2)Amt^ zJ~7|+=CB2qb^g>Zm{k8B`rXDyX@rgW=s!RKzAO3$UL>Yp&4YLP;R&HgTJX8p6N-d4 z3|wXw9KzXpF@kJOtP6Zk*s1RO2DX-yzH?f~xxZrdpNW0_ z)@NWyJ#C?M@-ytu?RwozjCLEo%58$`aL4|(i8xoKcRjYnGw>F`94ederF-KvZEKc7vXzlDVK3jx^dHAc9yg9_f? zNdl$BrUhu2nu z0O5bzDM`kTD3+25-CJz75YMfD9}gyZVtGSUoZn?f|B}g$e67UI=%@U)9SdP6xt9YA@o6#NHK@JvEMHG*JMa zgP9`hvoJFzY)Ahal(t1)3I7LBX5)V$gD@3%J`M3?}p;pIh$AKf3M#8F8i zg%OV%yV#YDVrWi`M~OXni{Tzo*wdm0q!7ysc?_DDIN>~89iU=H_?oz3P%diaNyPjJ zeEXA(JC+DzqXS6aEYoM~P@>B?idY-yElQ$W79GUOm7)w{U7+`%Z}L{{<%=01v~R?+_q zH^kTW8QV_*tchfLnMt2XkI7R=AY4eHXrhw*(m0zn=`vR z)FLD!)Z&}o%3Sz4ya0}0=LE>aL+k`l08taXPcx(yd9r$ld@y_cAJKLjk!*AMmM{e= zS^y~+f+%PK)pUdinA!;>k>wyN02%Ir24)lgSGXJu#$=!u)wReK2o`oX3QV+vcT13= z2tQsPZ8IqvmHHpq(PJU)k?6=)+mLFa$9#&IC8ARLcA}S27M+-3eGFeJ@w)_F{0Vf? zU&Y+%uQ4IIAZF|?NKz{cR_K3=7D+j^)f=+BNgg}dCVMv=saIG%flU97Z%UZ_dnR9J zA{elR1`^+#DsX~9M6nu?;uua9^it< z-<^Uh0jq>K&j0aq`dg^qMju?`u@LzL3NRHByu$ro2J1nod7g1az?)1P-d7kcz`=tS z;BykR=S!Gl24RlrBtiwk<^xU`VS+^YnY%X45ajpldGH+(6f<~D;v;Gw2YQ5&BdTh+ zl4RiJAKnuAjBwBj1Ke<$C{;?g=4Pyp3QtPPMzz4SP(d(ir*BMceLr5ba{{NRW}0W< zV^HB`&Kre?B{5hfoc-AkG}7MzWSas7-m$e?ejbGw$U`#-%He+c+o>|WK7wWdJta`k z+UpiJIDtg_F{cOvWn`8&j}tX3II8eR=;^yh9ts(P*B`(ky)uOJnLbCv*vU~fh*LMx zQBt`g$J{=Y#`Gori!Xir)fZk8UB*Pzdau0p`jsSq1+lj#6z&V?eCGt*C!IFTS+~Zb z9v~D>M@>$H$XMzkyd3>M@$|i?pgW!deCjl)gr6x|24W&l%H(*cImz9!bXqtm=TJr% zfzQ$bhhd~ja{h!g4fpa`oH3qc(53sF}hr_ z(LnqGPyIC-h!h(Pdis~y#2oTC^OT7k{2u_xhS<(YnMo&wL4o5rS>l}R$0HVI#D|69 z%bj^#j{XzWv{*g)8);}Eb?m_6S<^anF`P9uc0Bayck(!KYR;2p((|PLr?DkhXRLc6 znYwcM%7+JPMI>fFh*p0`9H3_PFJGzN(6sY(714IpiIat=tFQIBc>xdh5MiM3^R=;S z#x0Jerg3y5ZZviCUDT5?er$fO`Vl?^&nG)L9rWAqoYF~(1n&WcAjoeK-b9?U*>Zpy zBsy2Uf@g@$aG3K1CA)!Ro$r&OOESMpT_d`=xWZotZKXU%LaK3u3-p8?Di}T39ChKw zL5SSC^Vw72P3{KNs7?E``x%dd0A6n+&)u!qY7~CLdeZu+b=mrawUvXs$8jeZZhhu9 zir`dMqokZRpD5D^YE%?cxNne7M|)`UAO^;d_O27M4`I7mT)Ak5J#_CAuK0%#1MF{q z%rIj}=#2wXo@E)3qLdD@1gFkP^*= zCj7P-9qoSLDqTfSp?$Uww^_HcQH=}auJTlABkdK6;sYDU(EdTV$ck`ff%j}2$EjI5 z*04Qnf{&wz;_}9cP4aghHrN*zpzMDk+AB^cd~tW*#>1Q7@Z|sa{y9Pz?%1uV91I1k zaZ(7fJfAok=U+sTgW>-?1A_Z)91-+*>eqy_QM@FcYDP_Zd-WypOI?|WjC%kLw(%Gu zgdhbGgYpb}(=+hi@enXXOc#-qAGcsrI~;`mHX6VxxxrUx6=)%DV?a%nAoQtJz0AMXS*PlwEj~(p5#VN4L|ZfG-$VR zF3fv%{?coSH-I-L$zERRM>vt398duSUPq2JG)OEBLu84E&^?v-X=%2FA}rqEX3!F6 z35+H9#coFQv6~DWWmL&I%VAP74wtkvWvXyMCc>d&3}rg49z!%IS7QP&`USIOp@`)~ z96KXWGEF&|mZbB<@52v;qtbgOYvizIGCgXDAP^Y=4y^Ie_x85tr1hR~qT>|A9^sTC zyJ-l>Y%Tn64t2|3FNm-*TEahN6@4{gyCgZH7>|v?-G$AKuwEXG2nVutD5`8y*z#%B zD)4~@y+s}k#-e-7plV=u9e-9OPZ*h3$sU)jzVa7Wx6DaU!eDyIVg5pkR z8E%ezqalIPjfTNJ@$*4vvVNKfcMh#_f(U;+T1**BJD-0A*tPU*O#=pJKcX@{yNbMz zvwPEs5~$)@oxK;DZX+K?$y3378CUpmyc++afR1-LDK2+}qa46KJ;;f|nw;@5<`|F# zK4BxoSlU9vdl6*E=sz4t0?+kOn_=ugjLm^=9+nOd5Fx)&z(E)CRp^N4`KHna4>ldad8O}5P`Ow-L=EUe@lB)mzh+MP!|!h)SqYT zJVD!N3Dg+e)ce_9kg7m}@3dDCv!D+XT4XZIgsO+0VeSAEYHCS|R5UvAVYEV0NiuC% z2_jm;Tm4q!`dR(Q1Xsq^TLg}?oD_v8Ve1YB&5)}E{;_n#a7Z5|1WG$NPpGH)(rfG` zbq@%~r$%Uu#(#xeeRLvrOB%>5ZTWt;481IL44mLoo1_zZ6f7Le0)=4fs|0$Ov8P=| zDQ`u5@mwp8P*{aK9!@18+7YdC&gy}kG?MnajYKeZ7)PlMIExGxA1EKrATGomgZ&LV^HmpAPk+cZu!>QRRY^H-gcau}D2D$h<{4`MxLH4WFodez3yync$ zBL*A-hSLX~w$Nz6pHHQ*k(3&ZYdCk4)?^zEHE4n7kkjZhOrAiJlv8fh3VOW}N0KRj zh)skyQHZT@V8l1@pj_p_+;I_Y_$;)dY@OrH(Q=%}%U9b zugOD~F4Z29^O^dW2z;N(Nhah_KscNxGUpXS5lAAMlAQT(7%&Z~y%`$(B8rJ`)ODG-2}Ib!@7#H_cF%H7Wix}Jiu{|l zbJ)ztm2;J&)tsNpcwa3Z%icYkgscc5?}HwA@G|3J<&s>k_|sm=;*Pvt{>V}2wJnM!Wk|8I0!S)SCbow?Q#G^A z?dfh+*KoN^&m|NA1Bn2goE!+4bWQ<$3IZQ>bASYPE;$5gz_0-ygbfTNx#bUJzE?H> z7D>yN6(ex(_M7^7{p!7{S6@}VdShm$Y~ZQix!m~k=MCehlo@>-WM0CX#30OIW?)qH zui3V$Ia8RqDNHuN-w>F@+0gi&=qzR^eJ`>^e8(HdV;y2 zpJFEzrSu853hJ__Rml-WA)$4ExKO^zo`$p)Y6+5#vSaKx^f(NS4z(88Gk0^)a1Po| zL*IG!Y4$8T$<|;&3D!8Ur_3sM=OKaQKdsu*t!qtIl|HMCbaoY8QJSMAl%O@xB~^mf zRhn#HR`x;%qz{r^t$B>tlXZFS?%c0M6RpGC<9e($tMvQK-CWm54S2`;R@Dxpgjb6J zZ^|0qEA+&m5Xl+EnQTX$fN@cs)$D_OSqlp4(Qb9BCrV{>#jfxZ*{{$U z-O9Ef_ghg(C1xk&yjs>$bXpaA;ZDH&LR6-*Ajqlr6|pFlBVrTD6QN1CUm#K>QX)dr zc7KYQE=(Z(1sLfBcVlRdfTU?%M`$^*zI;rCY z{w+`XQMeKBWm8&ijrk2pW0Cv>#IP;%34i};FFVG>pZOvA|FlZa{M7!`6Quo{eQ@Gi zl07Q#e=NuPz&`d^Isa}iK32*D%Rc@u_V=Bu;wjje<)v3Yds_MD2hXeLAI~?)+4>Ll z2aURd{8>zsTQ5rEo{86zxqEhM-8Zl2@0-kAGVYuA9BEuHAZIb_4qjRu?&O-*5#wI* zzR}IUX@phVkfoin_zTmZze!ohmuWdkbN6$%%}2;}CguL25n68N;{ZHE)}jjIaOR*Q{=#TTF|raKuRM9b3FF=kCp?hFm~dp1x5jn^H|H zb>w22PYd!un!jxxGL+2R{e0JW`+Wt3B`?HWD!X_t32!%Xuj%=D&5c6$n%{;aUfU?E z`U|3!Y^oG-tEx@?aGQ~6dvd^~g>AY4(9eyUuFhj7yLIj|G#G9ScuDG& zZ7=4V?phc%Ynpf6eZDfr@)gy>rpvv?wwC8=5Oy0;C_SjMwzuxyT&HZ)XV8id1vH`{ z61CU^@<_B3dTmZ>c`lTWgGdH`9p8b+?eUB?eTg@{P9QhklgjN*4k~-hwZx+-n?P`< zUFU*CGr!s7f`?MeO+WOTO`h@d$y1o0D)-k0vK9|XhEa4&md3qA8kr`O#xUmvtNrpxJ#=e0*% z=CUKg5vRD|hJ3f@6H1Bb1{ZVz!IdqQ}7SuPG_R%~%uYqY&Mb5zGAnFI9Pi+PssU}ZU%IWi)5%VsY~Dg1gz@|T5( zL{<#^Fp0ee&vI?=CWw!j71$iBkmS+!6%Xx;8Vv?vbH z@%Yc9W9}oMW8n$t2w4Mkv_Aqm7W(LzS_%Pg00E2LQfj89BL>pt?o?V1LL1{0$Yu36XJvqJNETD(B%-z9Q@$T=dP2g#;K z2S{vExoYYQRGC-GV+E6Eh@727`Wc1v{mJ%Ce3Fm^W91jYdP4eTtO1sB$T(y!8rB0R zFD~Kh25o=)52fk7h&LfyCCe z($LqRVR|C=HUBLN(u^D5$o>YQpx(9AJ}xkv+y<-jZGU_H$?S&vj`y+IG!F0s@e>=n zU&9vvw@5!YEsP?4#U5g;cnPqjptX`4DJ7(=SPJpfcDGWRKPaWvTT=d=EM&iy+2{GJqRFkq1r=sfUgwUO{O#4&JoZ z48Z$uab!=8d&Yf4G7y(2r6mNiO^R5kSOuaMh-DzGecv>VL0!IA=cqbFIY#P=y}FXB zD^s1c5sAn%6@@_D;k(^37K-u_gH0_N*m376-JOFW z@r&L+^tTlk1;+tAaXq&s5HY~kR_$dIL?{r>Xh~P2g4%ENcvMX+I7L1=>oMqv(p8Fb z;NNrWUg9_0W+!Y=z?33XF5~r1tL29+H|_|E&n7B<(hGFG+H{jnyX}d+8Y1-)aU94A zBk68MAzxP;Zlz&Aazad!)8PYko^-G1kKfeaDjijzY4wkxYSX{jt!cwn$rQDt8rJlT zfuhOkqvLLgyM`cfV|(K&50gk#Hen_-KytF%RD8o-t9ugBvYT!L0iciv36k5K?mLj! z^_kqR4C8Rp69}<9wgn6ZBJvVJnBM6863wX5to>q*^+sNG=D5BYigE@@ulbG3Ln1$;L{0M{+es z2D2g=jsmv9!vH4Dox$-lkvw+S5L|=MiFT>#5h6|xzSr}}T}veS{)7ZI*EGi?1g)+< zGJY9*{@5Ja_AzfLxKQu-f$RlsY3fn;;`*f{{UF(Bdpo>Fn4mP=QYX^Zu`2t*k6-`c z`BwR#{xbej`N!wg%~)%1|JgIe@BZL?>+f#7|Lv7iKRT}ttgRpZKt1PM-@5px!3Y2S z&*xjTH!zI(mwJhxoc{`4FNL^L$_g>AiU40(DWqsPE`xaHXi%x(a=aBpbuYk05|67> zBZyL&$sEQd0_R!25nw=YPt^%(nGI<*#9ArzD^8)&EO*O~)zLSz0f6mnO2Lh`8F5xJ zIx7vVu9irEYQJ(o{5}bA^u(@CYtPlW%+ZsiU)syfPkMKCc6>-Pn@mB>Rl#Sqq%EyQ zJA%&E;s{8!tb`;r=qN5wL1`d0Gri2LR|_;iwU(@wRD$|tacsF4!L>TeO}?4q@37R`Bc-g?eFYp$AQb164jU+&evpzBNG8<5gKcy;jPkfOWOi+Gc} zAas$G;tHy3GNWtVlF~}CrNed$TPBvns{)iPzhjBd$=r3jYlW{pg2w^RTj0rqXYCO@ z1@NTcalm8C!k?H3-Y(&4tB9+uZ|SQo9d=D=kuGj!1xz*WwG-vqnJv8*;_f1nSh4r3 z(0~7LKzzDjLyX*MHrp|8Rr9fka1EkDC@Q3K;PUHNFI;`O_W9Gbi?6+Y@vCpXdgbM; z+$%hD?sT>?!tnYVTUTGcJi_+}UeM{s^1sUWR8${RqS*8in#WB}wbX`lt836~M(Ktb zigO`umj_p_I(#FcU}bsSQ^b2J zwuO5M!XfbnwaSRlji@^R=#vngB9!MTtw*{*bp{!l6~EYySSR2wl5Zw!AVqR<{g*4g VRX&M3&WGghDvYh|` literal 0 HcmV?d00001 diff --git a/misc/utils.py b/misc/utils.py index 2620b7b..83d871b 100644 --- a/misc/utils.py +++ b/misc/utils.py @@ -627,10 +627,15 @@ def torch_dtype_from_trt(dtype): else: raise TypeError("%s is not supported by torch" % dtype) class TRTModule_hrnet(torch.nn.Module): - def __init__(self, engine=None, input_names=None, output_names=None, input_flattener=None, output_flattener=None,path=None,device=None): + """ + TensorRT wrapper for HigherHRNet. + Args: + path: Path to the .engine file for trt inference. + device: The cuda device to be used + + """ + def __init__(self,path=None,device=None): super(TRTModule_hrnet, self).__init__() - # self._register_state_dict_hook(TRTModule._on_state_dict) - # self.engine = engine logger = trt.Logger(trt.Logger.INFO) with open(path, 'rb') as f, trt.Runtime(logger) as runtime: self.engine = runtime.deserialize_cuda_engine(f.read()) @@ -638,15 +643,11 @@ def __init__(self, engine=None, input_names=None, output_names=None, input_flatt self.context = self.engine.create_execution_context() self.input_names = ['images'] self.output_names = [] - self.input_flattener = input_flattener - self.output_flattener = output_flattener + self.input_flattener = None + self.output_flattener = None Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) - # with open(path, 'rb') as f, trt.Runtime(logger) as runtime: - # self.model = runtime.deserialize_cuda_engine(f.read()) - # self.context = self.model.create_execution_context() self.bindings = OrderedDict() - # self.output_names = [] fp16 = False # default updated below dynamic = False for i in range(self.engine.num_bindings): @@ -658,7 +659,7 @@ def __init__(self, engine=None, input_names=None, output_names=None, input_flatt self.context.set_binding_shape(i, tuple(self.engine.get_profile_shape(0, i)[2])) if dtype == np.float16: fp16 = True - else: # output + else: self.output_names.append(name) shape = tuple(self.context.get_binding_shape(i)) im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) diff --git a/scripts/export_model.py b/scripts/export_model.py new file mode 100644 index 0000000..7288071 --- /dev/null +++ b/scripts/export_model.py @@ -0,0 +1,260 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Export HigherHRNet in TensorRT inference engine format. Modified from yolov5 export.py function. +Usage: + $ python export.py --weights pose_higher_hrnet_w32_512.pth --include engine + +""" + +import argparse +import json +import os +import platform +import re +import subprocess +import sys +import time +import warnings +from pathlib import Path + +import pandas as pd +import torch +from torch.utils.mobile_optimizer import optimize_for_mobile +sys.path.insert(1, os.getcwd()) + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +if platform.system() != 'Windows': + ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from utils.yolov5.dataloaders import LoadImages +from utils.yolov5.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, + check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) +from utils.yolov5.torch_utils import select_device, smart_inference_mode +from models.higherhrnet import HigherHRNet + +MACOS = platform.system() == 'Darwin' # macOS environment + + +def export_formats(): + # YOLOv5 export formats + x = [ + ['PyTorch', '-', '.pt', True, True], + ['TensorRT', 'engine', '.engine', False, True]] + return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) + + +def try_export(inner_func): + # YOLOv5 export decorator, i..e @try_export + inner_args = get_default_args(inner_func) + + def outer_func(*args, **kwargs): + prefix = inner_args['prefix'] + try: + with Profile() as dt: + f, model = inner_func(*args, **kwargs) + LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') + return f, model + except Exception as e: + LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') + return None, None + + return outer_func + + + +@try_export +def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): + # YOLOv5 ONNX export + check_requirements('onnx') + import onnx + + LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') + f = file.with_suffix('.onnx') + + output_names = ['output0', 'output1'] #if isinstance(model, SegmentationModel) else ['output0'] + if dynamic: + dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640) + if isinstance(model, SegmentationModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160) + elif isinstance(model, DetectionModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + + torch.onnx.export( + model.cpu() if dynamic else model, # --dynamic only compatible with cpu + im.cpu() if dynamic else im, + f, + verbose=False, + opset_version=opset, + do_constant_folding=True, + input_names=['images'], + output_names=output_names, + dynamic_axes=dynamic or None) + + + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + onnx.save(model_onnx, f) + + # Simplify + if simplify: + try: + cuda = torch.cuda.is_available() + check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) + import onnxsim + + LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify(model_onnx) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + LOGGER.info(f'{prefix} simplifier failure: {e}') + return f, model_onnx + +@try_export +def export_engine(model, im, file, half, dynamic, simplify, workspace=2, verbose=False, prefix=colorstr('TensorRT:')): + # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt + assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' + try: + import tensorrt as trt + except Exception: + if platform.system() == 'Linux': + check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') + import tensorrt as trt + if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + export_onnx(model, im, file, 12, dynamic, simplify) + else: # TensorRT >= 8 + check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 + export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 + onnx = file.with_suffix('.onnx') + + LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + assert onnx.exists(), f'failed to export ONNX file: {onnx}' + f = file.with_suffix('.engine') # TensorRT engine file + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnx)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + for inp in inputs: + LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}') + for out in outputs: + LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}') + + if dynamic: + if im.shape[0] <= 1: + LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument") + profile = builder.create_optimization_profile() + for inp in inputs: + profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) + config.add_optimization_profile(profile) + + LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}') + if builder.platform_has_fast_fp16 : + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + return f, None + +@smart_inference_mode() +def run( + weights=ROOT / 'pose_higher_hrnet_w32_512', # weights path + imgsz=(512, 960), # image (height, width) + batch_size=1, # batch size + device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu + include=('torchscript', 'onnx'), # include formats + half=False, # FP16 half-precision export + int8=False, # CoreML/TF INT8 quantization + dynamic=False, # ONNX/TF/TensorRT: dynamic axes + simplify=False, # ONNX: simplify model + opset=12, # ONNX: opset version + verbose=False, # TensorRT: verbose log + workspace=2, # TensorRT: workspace size (GB) + +): + t = time.time() + include = [x.lower() for x in include] # to lowercase + fmts = tuple(export_formats()['Argument'][1:]) # --include arguments + flags = [x in include for x in fmts] + assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' + engine = flags # export booleans + file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights + + # Load PyTorch model + device = select_device(device) + if half: + assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' + assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' + # model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model + model = HigherHRNet(32,17) + + model.load_state_dict(torch.load(weights)) + model.cuda() + # Checks + imgsz *= 2 if len(imgsz) == 1 else 1 # expand + im = torch.zeros(batch_size, 3, imgsz[0],imgsz[1]).to(device) # image size(1,3,320,192) BCHW iDetection + + # Update model + model.eval() + for _ in range(2): + y = model(im) # dry runs + + # Exports + f = [''] * len(fmts) # exported filenames + warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning + if engine: # TensorRT required before ONNX + f[0], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) + + f = [str(x) for x in f if x] # filter out '' and None + if any(f): + LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)') + + return f # return list of exported files/dirs + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / '../pose_higher_hrnet_w32_512.pth', help='model.pth path(s)') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=list, default=[512, 960], help='image (h, w)') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--half', action='store_true', help='FP16 half-precision export') + parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') + parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') + parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') + parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') + parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') + parser.add_argument('--workspace', type=int, default=1, help='TensorRT: workspace size (GB)') + parser.add_argument( + '--include', + nargs='+', + default=['engine'], + help='Export type to be included. Works on ') + opt = parser.parse_args() + print_args(vars(opt)) + return opt + + +def main(opt): + for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/scripts/live-demo.py b/scripts/live-demo.py index fa6a4fe..4054331 100644 --- a/scripts/live-demo.py +++ b/scripts/live-demo.py @@ -15,7 +15,7 @@ def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, image_resolution, disable_tracking, - max_nof_people, max_batch_size, disable_vidgear, save_video, video_format, video_framerate, device, extract_pts,trt_): + max_nof_people, max_batch_size, disable_vidgear, save_video, video_format, video_framerate, device, extract_pts,enable_tensorrt): if device is not None: device = torch.device(device) else: @@ -51,7 +51,7 @@ def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, max_nof_people=max_nof_people, max_batch_size=max_batch_size, device=device, - trt_=trt_ + enable_tensorrt=enable_tensorrt ) if not disable_tracking: @@ -114,7 +114,7 @@ def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, pts_dict[frame_count] = pt[:, :2] fps = 1. / (time.time() - t) - print('\rframerate: %f fps / detected people: %d' % (fps, len(pts)), end='') + print('\rframerate: %f fps / detected people: %d ' % (fps, len(pts)), end='') if has_display: cv2.imshow('frame.png', frame) @@ -173,7 +173,8 @@ def main(camera_id, filename, hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, "set to `cuda:IDS` to use one or more specific GPUs " "(e.g. `cuda:0` `cuda:1,2`); " "set to `cpu` to run on cpu.", type=str, default=None) - parser.add_argument("--trt_",action='store_true') + parser.add_argument("--enable_tensorrt",help="Enables tensorrt inference for HigherHRnet." + "It should be used only after the HigherHRNet engine file has been generated",action='store_true') parser.add_argument("--extract_pts", help="save output keypoints in numpy format", action="store_true") args = parser.parse_args() main(**args.__dict__) diff --git a/utils/__init__.py b/utils/__init__.py index 8403a61..e69de29 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,71 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -utils/initialization -""" - -import contextlib -import platform -import threading - - -def emojis(str=''): - # Return platform-dependent emoji-safe version of string - return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str - - -class TryExcept(contextlib.ContextDecorator): - # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager - def __init__(self, msg=''): - self.msg = msg - - def __enter__(self): - pass - - def __exit__(self, exc_type, value, traceback): - if value: - print(emojis(f'{self.msg}{value}')) - return True - - -def threaded(func): - # Multi-threads a target function and returns thread. Usage: @threaded decorator - def wrapper(*args, **kwargs): - thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) - thread.start() - return thread - - return wrapper - - -def notebook_init(verbose=True): - # Check system software and hardware - print('Checking setup...') - - import os - import shutil - - from utils.general import check_font, check_requirements, is_colab - from utils.torch_utils import select_device # imports - - check_requirements(('psutil', 'IPython')) - check_font() - - import psutil - from IPython import display # to display images and clear console output - - if is_colab(): - shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory - - # System info - if verbose: - gb = 1 << 30 # bytes to GiB (1024 ** 3) - ram = psutil.virtual_memory().total - total, used, free = shutil.disk_usage("/") - display.clear_output() - s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' - else: - s = '' - - select_device(newline=False) - print(emojis(f'Setup complete ✅ {s}')) - return display diff --git a/utils/__pycache__/augmentations.cpython-38.pyc b/utils/__pycache__/augmentations.cpython-38.pyc deleted file mode 100644 index b55a9f68b3f58e1e3cd99b6a2196166b8e364676..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13747 zcma)DdvIJ=dB3l{yLCR*%-xff;Sl1R>*@yvHW(N+H#N;1KT8<)tjbx*0(8sDqG(}aav22`6^h%W|}x;l^@;CiDnSlyM~g>)!8B+TsaIYop;3gxA@x=K@GW z0r4Pa`XFX{FLDowhmm_2xwj$rF7XI*k05tYXqT1Dsnvbf`D(sokL8=Cs$FyQZn<6? zD>iEdN;uXgoyc^?^vw&G&Ye3m<0lI9c40Ah&6W-t_>n@rQkRY^{eEdX`D&wL=Ny}d za{O33H8EFT_O0cWrTN;#^2+i`M)f0eD9e@W`HWt&{^|D`k6GWIDt+VHSAKW&@V`uz ze*4s~S6=(ySEf+;maKU6+n2sIRa)7+_h{q`-<&Fa>zn+WD$vw=e>yV9@i=1uwgN;^ zfFu#22_0Lbi>L?*6DdQ4v2@E%o~X<rKKvZkgBW>xjf1XiWho8`HU9y zd;m1q3Vk5-Uh1R07ddR=uIV>Y`E9%S$e6@NDQ|?1jvpDnfA*H@8}oT7GFs_VpQPXB z)N825@B33I3(oR;Q>DN9(ZQ!b{Iwr+DitLY^38d>T$*?N&{A2r^M1%FG7E5gaB*%%{dBQNt+=MC-9Sbvtw z#Vr?{94+$N1Gy@i+VXJIEmxewy>#u2H&*=UWXZ1C%ME$#K3W0cxS?o@sjS|&ogT(> zxpJ-S=5phcl{&WGIX2$yni=$?_-@Ui9h3&f@RQr|KoT|jv0R5!+5i_~#uTPZ-LbRQNpPAc`79-!oo|$D#ky}S)XOzLb{>tSdq&E- z^rL5%3wDF}9{8Q%*p(t6rs|s*wOC&E)f2ut>#H`OOaKnhh>^}tjUeY71W;5{ji?%) zl%7_tp1=4{jObo%znWAHHLb1=zsa7{iyafNf|>3yhA& zERwj0PHz$^FokDyKU5~s;hRJ*wj*~_)w=+;(F@@Lt>ZEtXB41B6q9~h3q~$t?2l_7 z(XJYo1B~Z=&FJzjH1*Yjug>kAyX%>>$3wjpr0XE57G z&@~g1_tLja;4{$E*J=$vl$V5r`f9m>>Z4Sxm8-rPu#SLa^_so3b85hZ;FCwt#i8R; zaLR}9cZZV>T25!cc+n0oI&<(n3{uNiZBUGr%T?>537MMn3@^+W3@4V^>*FJ;omoD%A~~M@GZs{BXI(tO*QLn?37C zYs54wpo@$liU0XV)y~&^-EoCHkNnQb5L>3{Iz)YnCMjPatmyn`CWPIL*~gG@!9Um3 z)5zYIp=8K9JSI1TPZsglS>(!|O&9?JPBGIME*OBp5C+=x@bopJ{&vjN( zg}$K^4S9M=A5?@kth5YQU#BPJDK1ol4G7t%^;W25w!)y|a4Uk|5mF~S1L}eaij8{F zfMzj5ct}}~dXWvaWx2|_#WkRF)PQ<7^mbjWTNk~pE{<=<@Wj24XKiSlGQC(k753sA zT2aUN3Cu3!5lmpOEmCe+@W-Y%e~O=c7>kzE1c)(gLgXnB`61!1H0;e6spzK!=;(1C zx^Ub~QcfW!>7e~^vsNY%;_HQL6Oix?2yW<<*W6R2R{Dukmo8kI$(=kmGx7N4EAk3; zF~Q1lsCYZ{5#K0O${^UPEm5dncxtxO3q2;#6W=>Y{X+AlJUD*H30~LurAl9)Z=#=* zcYXa@zQW&Nw9bYrO*e^34G0w7t;{nnAd;So7%G6hu7Y6hUb0bSEmWfp0JtQ@d%jaWI*NN+QJ zCKh27K^_|*lAWcKN^Kk?o441zS$m#`j(PkJz8dCvL^enmgr-ZFCjE|Qv`r!dSp$Fu z7NsM(P_CAtT0sf1&&jg542BWNji`K@+CB@=IVw8f_dRu`lbtztlC!abwDYdbngCcb zsZM^}X7z5>ejGBO(*b?ysC%;*1Du;qJ{yACH*aV4xsuH4E6r>2QTqPfT|g|;Pt0Sy zTxW0=R&kUo+6V-7Qaz%95nMxln}INlf-e2v3*09Oj4fB62Q@;%@o}|}08%-S#_OsJ zYNumc(4RH}nr}tCuooIpyohIxfUgO4k3tl`ZfsyYH{?ZOK7e(c@ z0kqiV4V6MLPuM7GsaNMoy09K$PRsXU>jLgELn-1OU=T)rx$IpZ`OpIpq|7>L{7ds@2g9Gh_w`7TR!S* zD|nVC@SgBhM?Oi7^yQ;-zP>_Wd4f^^+T46rn_EJ=0@@d-O`h5SEKg8sBEJ)lqNK=| z3qV^!aECNP1(ZJu92x{4NI@OA4_x4YdaovLq7DWCNq+{n^i5LiXu%V48wa;6%a%_uvEsSOao^d!Xk5^Fq#(Xazjs zT*Ql@Ulf>P1g*RX;T_`+mC#u2612mq^LQ4O1N(w+SyaYY2;77${F)mA%ZYiAbDWA( z3Ydr5+A+_7=?Pa0=!IGb2X7nx_{N>RT|97w2~zGb9Q$pQlw?i#o=B z&|DqaqAh3cVMrgGoKIwKC*}Ifr?Hnz#iF@SYaPbz2a1g@N-#Z%MDuqg)Jn zcJxNL1ug)Ij2RUnL%}GmHntOv2Pn^q9ymJ{Fd%*dxtRc}+pDG){o zVy9_m;&K@+eXH!0Yp@4v1zWCAfnF`w)mN=&kZe@UQ$H8KGNE}1ZYBgo;Y>qi@@0R2#;f89HLB62QzJxmL0OU<71uDW5#}i8_kVe5?t3;f{q9_J+|?YWq+gr}AJ2h%5Bgy9tk+c>~_S zO+yOTf}=GJw;9{NC?CYvlHPzAD3Z+H27Izq}>?7 z9l&T<&zersc2c>iT|ID9tvyRKp;?nf(4Gl-rDz^H)J6l&K^>1F;7}xay*}7(t>N4{ zU%&H=bve1S#m_%u{>~uqbEG>4sj&lMSBdq8*6waOIa7zkaJzgIE86AlULS%jrFx_7 z_wM!KcAnow(BAr>z*%l+3u&CzuJ)SojS~Al)<*<%6s&Pa4Nj#~g75aUjrO!{*8rb) zLk@Lz6M7Ks;6mE#15WS4dk~m3DMo44%pI` zazv(8rp>IV6lpfO+G%b`Obq2)o&Jyt;GTh<1c#|vfL|Yu&;gXn_XKUbqNp<-7qBMw zQEq|T6mosR70MFg3Q3=BZXL4dbUY3{FpVP2$3vtc9ga2`ptZ+6rHpyt(BNQdwYk1_Rs7kNqm*pI#l2{C!OEpmzsC6=X0udc{p%mM$<43yJE@5Gp zq-{HG7cG-^da?>uRiIkGrpRa*SX+wpnj_teYKX2 zH=V639(kI&JxQ&??e9ZTUJ3j{>dKDe_S3f??oIS{o&!+gL+X%jK}#M4Osi?Yq&BFH z;0#txTJ<3Gf|L3P+^v&HJ)w`NBkDVKxZd<5XfdM6 zSMZGj;Z5g9ia=M^KMe|S!S}(}Az2|$W>_zsfh6yGX2@Ggc~@I7z{4UWW8JW4KsSzf zx@VA{49^*4$1^WOM^HWZU0{0wLZ`H#6_~J(0zH{EWX{723%%JAq1{T+XvN(qtgQ}* zFrddWcY$V}fD8{W!rQZ<$R{w97+YKL)xv`s|F~8H2Z8Sg8o#Hl$GILFcbiK=w-3g_ zmpWz87=kkN!)oY~$4x-iMS>Zx_hFT(4HfnquBP0?LjMM?hORz=R2uw>uBb@!NURS) z%b?wW{V5`gMyrqXVXRy6`U1`d-!W;yabg7OV&E*=;#FV5A;=d2O84{c@23J5NE$J` z!0)B%E`V>raAe^q!->qG{3=zm#>qaVzeLs#HK9d69Qc>a;7U?nqIRqwlerFlRH?|n zrZSHJlYLys{1V+R)SJ=?EYA=Ix_`nq&z`?<;mm12Hdl6+%D{dQp>Gu7UsM*-Mn1Z~<^7ksr6(3K8~hIL!dvzAU5IYdZ1X~rU5JxA9+_V-cE zS56A_+&KPHuqe~8B}b9ot3IM0fOZ^*1{~Mq>u9^p!9+Gk*H`f*V>_{GPg^7%>H^_4 zu%+XhK`Rb#)I;kv z+=an1?UWhavWRO4@=l+0v0N0~EHPCQj({*Yr_(BX=S7eF7UPMlZ{uAaL zU6LlQE3PS$q?N6a`^@s<@KeAVBV32%4wJSguXv{DTQuYqH?lx}Rg0{(l7XB>m#Of2 z!q?$$(V;H-16rYL*EJnqPJ;T9BE6`PT)%q7Q-R?FZ99*}C`swIf9!9k+KI1DNkaE_ z?u%0_z}mU>sZHWXjQqC=T9Oh(5bRFi`;!zb`|eF+aC-tL+;Ozo{0jhxb@GcqvXH>2 zxyau|c{R5Kz2svk9sxGAZ&4tDM+-3Vnr@HO-MTa(b)@vtzUTk*-7kH1NQWQzOx&A5 ze3EF&nz-v+%+0soOTMuTL)bS~7%oG0QW_HU-)ZFkAaHjV96$=n5id4EJ1~X z1SAH@p;7kRB=!Az3ep6ZcAESl+H8{|B-}}=u*FFJMz~4I#sbX%?+xG}v^mM@{u*X@SmYfDG7+B|%QOYj-akx!1yf_tH^J#6me|@U-6#srS)sB#OWvW10N#{pgp{u1V;|ngf z8d?ehHdd1yfC7I5T?JIMs@_6?6Fgh7g%EjN>D~yzD2n_FN=#rH-$B6x296GWA}XIH z3*WO|Kwwi4=5h69+%aK`H9$6wqO_VcfxyS(a>eA+4`(qgFpJpgruv2S(yODrD#``4oog`gRPE}-)viu zl`{s5WUSDX66RTDWvo_r$DC$^!VzpChv`FX{2^8f$eJT-4A|Qp8|e$whVC;Zd97{} z8pN2`I@^!L&kjq)i$FoGIJD1~foW600z+nJC4BN0) zu9f5w& zO{+47p4*U*j;5<=k{$%R3Vz2m%^7te7d-|1M*EDurv5T+CvXdnJ8)P~<{J44!*0Zd z&!q$}516<4O~)OZvw=IVU9#mN#;}8=wHz9A zn+QqFWGwkfOeAoYCr>yIopQa#aX*0}&L;&62hltox|Swo6X!3UICp0H%xv!BnG@4+ zq<6BHXHNqUCurCmdpvN5$v%UzSjSSdQF#85E1>7LA88TvbM6J$ieLO7EIi`o*Q1DG zL_i?|LyZrD1hXORP}GlJU?=&>`m+28`kj@(1-MN^p>g&0;{@li22s0#%X(MCeG&9o zxtBO`QL7>UGzAhuZ^?yV2`V{U6$9c{1@Spw#3-2v{0L3B5P}$#?zZ+AJfA}|`FUz< zaO_O5N!`+0svkw149DWN_0a>=;~{_|Z4;{_Fyi;YxL-|o*Vhe5+ja*_te1EattK@rG5x_g)6%By=*ePmpR`@37MmgMn5d`7G}{(A)Z;( zNb1xK8i__hON!Ef&z}A6xUUt^?|3Y}!=)SCpadax4RGslo9vdh-#H6lioXIq=u@9uN5D_He5X`)7|_ zVETUyLj;z?$B}B^Xa&YWSIG}dbw(Ig*NkEl2KyE2Nw&KDG=a|q-H%fGBmhk5DqM8s z>QbQYc1MHCMk<^mf1jG(0$^(#HcmjeLs*dH<()xYg_cK37e#Ij!2r0AFKowmU}CUT zcqq1r?v^RfqMJNN;Bf+k(Hy1aC)x;KuxW>S7zQHF+aK>oO3=p*^onRnQ&P##$tBD# zwi1^hZqdsn!jd=}-!X5a9m1F{XCMyfDnB1**y0AJOw-W3(mU`1s409v%qDrQTgXIx zRyqq-K<}#qTcY&N#2!W$BKA{woOysd5j)75Omrr1Tt~rmK;#{y0s@C?ivqIXu%N3d z5SWQSPI2%8%rm${h!{15U1&GReWkbU2}as*;B#V&b_*fd*~3`Js2L7cPN*r#!j`)M ze6_lThCOJ|MZXqpNWcIoaCCI%rnK=ch#8c7X23yEP=eRUxwujX(c&{&bYY#nRt>;ogr)|qxO<3qF$Y#9P8EU>1Cy4t%N9n3iU=2$@ocfGTn zU@?ZI60B?ni`W7g#CgKoK%7l}6kzu3ENc~+NDr=kfoea|1)L?eDf#vZ{&S?EAMwiw zw(ck0*8dY!Qn+9y=19?HAFwu5yl4J7RR4E zzO|KRZ7zG)og0pK{;?%r!5Osw$pt}wPteyA956plA`21qfsLTkgosG^4u@}R*g=(T zH?G>aA1XM3=PHRQN|=;E{4K}1a_lHa`*H9XNBjgq39rzguM_wofgcn2IssCrSwCd< zA24n@rJk*45sD_@rD-}wHt&|mu^jrI} g@x6aPv7WO=tUZy471a>6zT@w|t^2HSav+)bKR(??y#N3J diff --git a/utils/__pycache__/autoanchor.cpython-38.pyc b/utils/__pycache__/autoanchor.cpython-38.pyc deleted file mode 100644 index fd0bfb59c7cf51f9f193f14f2f21706813725745..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6508 zcmbVQO>i8?b?)w&o&CWASo{(0$*juBC)>xIol`EUIXVX$RjzX7kc$pUzSpx0a4qYS z1-7@Rr+?n-e(!zX>+g+@<`_O#C-0QmDaQVj9$x%dc({RY@FNtGtue`^sPi?hf5n=i zf6X;h|EAVb_%-TQ!&71MrJLe>#2ISkz30(M%G3+b4R4L%h&QUEi;eU+Nhgx z^ItNVeI|CfoR)bx`iSHGn4FQ5a_SLZ8<*lER-A25=5Dq^|E5>o@Ri*PYxN*^0NL0| zk;i6uUpDZYy{mR^KPNG|f>5>dM>8zjWlp2}9}~ayy@hYk zMiH@1)`PP9yeD)Ch4*=fM`)X8D8)QG#ySH1umxzL-=ximgxZcoFSXAigUT&KZHHDb z9cG}Y@l5PsMAUke~-l#lu*@NIrv|E6v@|~U3Irz71X?{?If!EO1i3;i_^(- z5SvZq%h+tVju)HTHTOYmLD`ygV>75#JwpawVXYjLcCfYE zZZ&DuDoYNIDUoflxf4%faCxhgG?cXPQ&=U)qhR6~FYsyn6}Xzkb31)|b@jWe z%l2>SEn|fn2Y)9RE0YR?&p0X>d<}eqUtyC%Iu5=o)NI5z`KF*_@G_|DTE~nGY_Ty- zTW;Wx3~9>Lo;blesW7!^ZKkCq(@|iV*zGO>B!t1?-fu^Y)+!bloVLe*f%;EYU@qIC^FNm zla10*wl}lSbdRxpSpPlI$#q8LcsTndi$-MOOH)onIXQ{nsb^LvkNWhM`2|;IIM+KC zk=M*5#^n9{uw}MyUbNQyK9gg|S!Xmn9ATJkQTD{PA1P(4vaQD6`8wa(x%BaC!)+LCRk~Lw2&vBiB8_j z>H0~kCvuVlXj4D65?c*lw(72;fO9}5fA=|$Gh43Pl(k0i+kg8teSUwv`jda6&wpLl zZb2>|)Y|Tl50gRAEFa+;oJUc`k$-AqTRHqxB;r!IdMqk1k%&*Te-QW0kJxi#<$wks zq)=!dcy7dE@lwo#nAdefQq16eoV!x51!385G_Spi=YVX}!e9I47cD(lyi)hePCdAW z%Ao&{xFjkqsJ&qul`ij#$h3Iap?L-?m_X8KEZAo{#R(4z8!OM4j&m}4B^FAi^J*U2G zx0(>owc$8!-M+B6<235DRx5DraKlB}M+Up*QG;D+g)QY4Tb~@xr)9;Is#dLG%EJNc zE~Hl&6ZajIO$cEv`y*-~TFO1`N^^((K{aO?5t7Qtj6jeq{P2pIBcR<}jv1xSD(&!o|*i#La zj|k%U6XS_lF$gLFavAwLf!-5=)>I`WvoiN6+sTGz#KIJS_#ZqTqa5o0<1q91+fG_J3SdVD99oftSKr^zSA$v3w zy-_(KCjrT0fHbx_K7iw=scuS6ZLwYfaLaoW2yy^6z$FVOdsE>w;AlD;BhUk4^7p4>wa2I`Ch{0m)-iVC}YOJLL^8uc;r-ZNQvd@Bcuf6W>F zVeJ!I?o$bXE;Dr~+aUzKMn<~X{`K8 zL^NO~nvuqq=)Jnnz7WcTGzPUtlUpn_2zGF0>CH3I#NSg5x=Le?_0Gz3a%x|6X1);j z7otM%HISg0X|{P5bmaUV@61A?*;D5$ZH*v(!&8$X3w1Py_+G@TK|8h^9*C zq7>FYO!)2lI6W}w1?L@@w|z^wh$DfmeLuM9Zu|9ZR}O-kT~U6+2Boce z6yExdpUc_(&rMZD=0K}=fHem(Dci_X01dzMjpX;KImG*-TTRFfCgmdKv8-88d z8whu*=_m)tiq-M>gL8dm9-jO3>nkkS(ycfk{8!&vyfm~NAZyu1;p!#4fOa6+*nvWr zjrNoJAXTuPdKI7%ZZw9b>4P42QPl{Lf!L503kF9Au<8Q7yy4y7TJE1_f6>F!JR0`B z06RB4N}se&QWlSr6vAj#IGVK5$&KI?`1I}qB7Nq%78J2bUA)FyDr>W<= zsDQ75zdZ)&9O809b2;#|2>Hhlmot#17)NZ5n3+0XusX~MFGxG6!yAvdMvy2!FNk*cDE^n0Q!cEx~L zN21HOH0PWK>)fXlR;+-;rzD|kF_kFrmzX6^YZ`g^%>>3ZkuhuR1h!E4T2K7X!6Z~hV8T8pxp47yF;f&?15(14E*<~kbdCWEIEgLglj+% zo)Pj%ThYN8b&W(<(iED%@YEI46s}ClD*+IACk^CoW`ukbzgCpe&uRTOqv0gmW$2j$ zkH>6s1ikXGfh^j07-+kv1ijAfGfiC8SsA#*`fvBUzkTms%wyW7uo16`xfh1 z_x~m0>USD`E!gA4W28OFS6dY~u8Gb3mq*CesUc zd-Bk`>2`o3ORg=vtL>2mxKYAsueL`Q?8K%@@aL;)8B>47m)bKIiF+;4%pjB)UZR!T zY2CbRx6@~93;4ISw;lWsu0V=eMb3n)4pjv$3>D@5N4BFmUYt^X%acb`ub5689~v3{3OD&A zZ=d-F^BbBVws0NegNcLUyGr3s3bm;ZfD`Xiln-ut@&nCsk$({!;SMY9w5kL?v56xH z)d?*AHAnq_<-F=8OqHnm%3K6add~csP8Mpb$x1Li2T=b>*_%tUUzjr#yxbu)hGi z(Vkjuc^a*?P&A9O+xbJ=^gX+sgY^PZ0!qD3Lmq6kSp(>`u60q(jF*SU~YIpf9b4i-%dt+z~UW1P<(pzX7GYkLXG9QT+g& zRtCYHVx#&L1zx;L^*<5P(9Sm$7}>f?aQ}`N71GY z(oLNi-s$80%>mvoQ>IJ-8{n#bh>=Bpw|IW75R_}poyBs!MOTN5+xOKP-f4eVw^568 zHSh=AKze0lbU2sT^qa_&O-gs;Y~WPf5>c#pA^~*bS( zftw0%N6|IFQOrmpiBZS=76KEoBsz|G>}C+Sit_H;CbKh*6GL#eFQY(*Wz&>$*ikn=&q9QTn(kHKiMri^mj^2&H3^ z4pABk$!L>9q+N;)57{IEpsLU#uf$o>za(o25{KHyyrz@T1Yt$65sL;EYvNGV-|BJO zvDm7{qEgk7plJQ6pqRk0ON$X$st-}n-UfG3u!+2|ct&Kbf;l^y>u0Ew2J%;9l3N^^ ztSL}V>a2AZ_q+uJ&RL!xKaN@ktz-Nxgwv!(BgTYY0iH02=@libkc+M^6cHNTki62F zZLtuiOC{--OQpnYQef82oy3lcG$5oRIR$Mg%0NRrnpjQ3VC#t`kOwFC26Pv#6KgF? p5+-$UJdBWW=1OvLd5xwHNKDhjK0uQU^71?{h>VfR<$3<>{{jACnXLc- diff --git a/utils/__pycache__/downloads.cpython-38.pyc b/utils/__pycache__/downloads.cpython-38.pyc deleted file mode 100644 index 0e3b5125abd03c5ddd432bc977077d21a7cf2b4f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5324 zcma)A-*4N-9p{l0Dao?z*iK@*P0F}g6Lq!Kq;1-@`PDRb+AVbw$5}FAg`nstSu#Z` zcT{2%)E?%(25TROB7?jY0}AY6*k7@y{sDX)u%R!*u&2QouzkKq$#Ihnhy?PEcgMT$ zeSdsD-{YO}@w|rLg{f=xlgBjeFZ6KmXW-#7F8O=hxK`0P)u#pOpPm43jAemar<*t8Ry4%j^|M;^5eX~ z$5ET$ukZ;ziP|K0_%xqE?GQh~kMdd6ruYJ%=P#i)&Gp+_>D6d5e>HgIdqIsmy|CrU z{CALETP?A~yjcr3P}-GNcctaI>vf&FhCkYE4%bJxWE;0=CDdYVhxN6Pwe^7!YrCwk zhi0s|Gq|j#zQg3}q5Vr1GjRoDvV$Cr8RU1d+fCiqxqer>t(jVAQ*V2$p>O5|XBz!~ zrg7u8b`N@m)aL4Uf5wXIE2TnW$WSC!Pk3JIev;|dgmjZE9@Cmp^Ss28^$oYYJV`*?l*rW#c;5(C{y26NRn z6HB195&fn4ftCss>VvD7qawuqai>nzI(Y=#|YN0>N*mPD&F>KXpXFeWZ# zjQ3CpwKm)?*1po7K&{+(tPgZmFxSkhfdSPSny*=g#TDW8-dD*+)6+zvVGASsfRD)jF?S+ZOU8Ow3gDOq; zVGUZ7U9T0Eba4nTCrm1>rMJV>QcTyZLhrH_xyR8aX+xSm&%_Cod#`PTVOPFiKB&s2 zdeAAiI<=-N%lCV7soiZpr-P& zJgN$pw}e{{WmK#Oe(3t)B6idaH@^E1ex*#39XgIo%ni536QkwBiKyxdnHbp2XvY{S zyMYh4BX>El@i7>BwCj`T8t%Ht#1HZGH(dDr<5HcrG5TduDd1AuT7fwfFlZgt&oB*} zQbEWOfM03cj1BmJ*|(x^qkC~?VDoIu(2mf}w#-=Iba3Z#L*e}^#&F>;)ClRWDXIkH8TC_HNV}R1yMewS1 zGqH^o3$fn))AuB>l>~SZ%l5QfE?ymL@!oF|+!oCL>@b!BdwWMo&!34(c z%cbG@+o=}lTV9I}jj++>P6H6=oQh7L za(LiM#}7is-D=6u34}vF>$IeE-s!ab>izA|l@}s=d2Q{++WStz^5Q7W6%|Iawfv?N znNAouk+HP21dq!ilA7=l8BOHZ+{IcLx}9#Q#ue&t5ohsLXYo3gw@x{W0m?~~i^2`X zw)2*=_>r?%noh*JhybVZqO{lAJIJFu8OZe z9E>kS14z~6na^3@40T{B;)T2qND4|O5b2X=!t1|a@@tH+iRRLAyTILczMmPsHO8~M zO#E^9lp!MD)%zB_vcSiH%4|*pm8xhSj3fSj6B~mf;)lF=Tp_#iV!wz;)MASl`9vD6 zK=Mg`=yQ!vT_q&2qp28JAL~2%&-H$`ZzE#kG-44g*He0pCTYwL+aIyMKLP+l|7amR z)SjY*0_8M6{MZ=GVBZCh_Yp{%#jNxD?_5sbiLqW9u?O$5);`=m!e{V5`dFtpjO|Se zvV6^F`#IVTpHnC>A7`=Gm*NZ}7W-3}@UrSTrh1OYIrP~4mB%J9OxXeuej<%xu?Pz$ zmRCb8hRpOZ7nrhA>vB$m2kU(EbryZ!u8ym1a(!ic%$mq zI&Ph~b@UsNx{{M2E@%pT2nxLW#BUnnInU-F zj_jcYA{`F~QinZuDY90Ap3j}A;HZsy+fFTXnzW+U{?}#f$*h_PDBjHq%FjPu>#ia9jO}`K9roploE6OeI^w&PC>8-IKUWx0zTe1c3y) zVQN4_kbMBu3ZG@K(P%wuOmQ6}D!7!M$coC6C;z{mo|>$Z@ny%`ifky|fzz!cKbfTk zEg2$b5rR?>c#)0l8R?EpvWhvX!DjUF$ep=pJ#?3vLD2ME<(S8lB@ z-*NBVy}k12(VJdRx@X&0e)Q>?TdS2@*B-5U(e!Ah13?i@KMjWwy2D)M=qwmjN)XK( z4sqS6kC?F`f{yb)<`N%c6QEb{GR#S=qbw<|(B7De(b7?IhT8Wja>#8sOSHOl9_V_t zx+~7o7=j8xR>$$cf6xn%w0}ZRd620H*KI;hN z#*#KqESa}wSzbTFir~m1&J?qHk(ox3>1+xuIxe~sgC5Z*5rHP2beueNHv4bO5)E4W z#F=yJt(%QX``wMOv%bCIhmY!&?d9bU-S&+;TfOyjojp73beE!5_j|xgN+fM6UcCaS z1I%g!sv*J#OvOR~F-jxEnMHvw&MF8c^9jx=+)`Y@R4#f2XBV(8xWiD-Ybe!om?7@v z_NFfG^DK^(IqiccsBCe_=A zo@Bt@m@UrWBxMcP{&uuBICu&)@=wS{`SM>!GGaH=JCtwYe1!u?p3iLRV7rrOKRntO zGH=Qb3g{o_M;_DY;SMqCXsV}JpVj#6S0Y&xk&_}Dby65*iQ)P@vG2Bg48&<>_0IJuSLW`+vfsmb z@KTzV)}5tty1uh0DMej);T0F0i_U18>-V3|^FlM=K-Or94g^=?XBH?e#yL&p%1RJ= zF1@fgdu%}^|Ix%qi;hZ6Nhk2*XuRj6=t8xmB4XVxzK%rJ)xdY1(?IYujYyZJQZX^Z z7U-DB8=oTtPJ}6pyh9CH;7ZsCL`Pi0qqs;npE`0lD2lUc`?G+C0@|lomd@c@cylj1viEb$NNKYQ?G;qgZ$ZC$o3a&f<7vf>`lf~Bnm%9% zQGE`~#c-6N?Xg?rIN)p5d)tT6) zVkWVxP;R|i6(3NaGUo%TDaa3~MtDbb8gAhGRO%Hr97*v6897az-lZCS*HPa()X_gB zkOsAp-j%oq46HsQs81)^^PPbAJoh5GoH`Ae=E!_h!hmFMBR`h6@IRVAlRx!enr%;6 diff --git a/utils/__pycache__/general.cpython-38.pyc b/utils/__pycache__/general.cpython-38.pyc deleted file mode 100644 index 3e5b2903d9aa6fad8911329c5e02e59a95b31f35..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 37812 zcmc(|d0<>wc_&);)~Zrzu`J87ExXH0yDhgZ+3ww#Ztrc=wrp(oR=rf0>y)IDYV)~Q zwpElm&@!}}(9l4$1TZ6!1n^`gVUl@)kOW9)CJBU)<-L&1O)?>50cDwFCYd+yc^2O9 z_nljtYy*>jp6ojJ-gD2seEa$C<6T|x0RCoD&t`xArvrgM=R@mX3=jM9bDjtX0xFOS zC`;J|D`m;mPT6t|rh;+}r9yHIr^0fLq#|;SrlN9{tw>cII|Yji|($Kx#C%D;HVVt-4hAqCIb= z_T=tSJ-K^T0sr!+e>3VacQxB*W>jIMc!Q4X<+ox9Yd0{Vd^iSKV zH~I0amc{Q=1F1LX?wop9tyY6-javI^5ba9J^Ex#&^+?`Q>tD6e=MA|{Q~Re5qz~pc zEF4nSqJ?x1W+PZTy)_GYD-SruUYzksEwsa%ll3H0_v-3i@NiQl{%`v zrtVVPaQCPhR@<)xQpeQYY6q^zb8k^QKVhj6HF_nKdQAPM+O75=-3evq_Kn->9(6CC zPUcR|2T*%xVNBh(Xr)g1HQ$e#ZT->Qsj1WWy>;9^9C&UpbtZQYRE&J$NNB zkN&-8UwBb{UA;-|Lurra9-j}So_I8%-mD(J5>OB4tf?pGgQ=$w^N8AynEigt(}+2s z4kG5DACppt)M2#y8TDJab-8EP1k@3<{-}ENN;vhL`i45Dj-!U>)o-iE)Ct_B)k!sm z^e?F2QK!{g5tGT?J#MKp>MZWh%l$d^IPSCR3H2mWD)p3l8rNKI6zNjx8N3_M4bKO1 zsodzirG8gEuhLhndHhRFsNYi=bsi}u)&Hq-Y8-cYNjs`0)&i<&Hs(@6}s;Ek+rJ#OaT~Hb^MfFYPsu|ps)E}se>JskCxi_m_n1#z~7EcxRhpMXH zhPw-@rsj}atN);WM7H~QGHB( z9M>OFpHSCueY^S-^)J){QoKX`N42Ovg}ay3pQ;~MKY_b>^)2<2>eINpqCTTOi}deQ zf2RJW`YFV`OMP4YEA_8&_pCCdPV)L`Z-+Rt^PuNUVQ;?epLOX z`l9+p+`T7PRlhWEVQzj|eF@L6sxParAnkkAU#VYFZy@Gfa&m zgX-U_UqhLS*AI+6=I#Wo_xcPz;Um^$|8@9Zd%H=BAa%EQTGj7h!7jw9e=v*b2 zad8(vH>q!kYp*9RUyck1}q*>eX@oH*x&Ts@OZb^EWLICAFTsk29JbR5w6Oku=z z$MgG9)e^7@sw2*|eE}M7IW-nUN<#Dvvi;ow|88|o6}3gXsMNBkDom9)af(nlLwwkpF4Ku$brLWy@<-_ zOZk##?N0TaI(a&cG>vzGEW2L+<2~DU2tQ{F0yj_#%v*B-*Qx~;?8U%>U_8QQ#>`vO zoXOyr7tT1@eBO)XCrV|V8xDCv$JJi2kSk$`OWCr@p%pn^uUnqwTo352DE-DGqm$)g zZgirOaokK{G^4QR(~eu#nTgz}lP^{Zxt+)I6O%c8?95os9hG%5I$>7LNM+WG<(;%t zy^k#pAqdzpOK-#Vza|j;#TUQ$m!JR1zxuH+es}(tZ|Z3T38bg}xN*_P1>kiz$bPYh z=$Sn>(}8!-{KXgEzH{lzUw#f93!TTB^K6X{Ik}9^PU<95XbzV2$pm-d(Tk&T*-1>U zlRJoK$3_sa^-hFY-nRbnowSA1h*kuqtm%lfAk}f`)QJNJ(~q8h9BbDuJ6<$bx|rAH zQcHatO)ZsjM6%^V=KQ0Gs#m!S_cv7;M5I)i#0@KC_ZLCvOxxuR{&Nt|A(`axSSon> z$-~v2(do>@L?JhNsjRUkCsI9+9eDK76GzgIpE;2}h<#EWm~`EWvv+j#(xpoyChj{@E$4q+i0&ZLkAIJu4 zF`#=1XsY+RwhGQ$@7siB98@6{zJf)47?V72Jqd7iU*N5QGl3ruJQ+AEcTeH2G!P27 zq3O6zSwPcP;OT(EWIPp6QA|m!dha2^nPh%E=}zX7*r?f@<0SP=$w}r*PG04bnWQRb zr*k@)EtlL3mVh3qc8-piNKD_k>WWe38+CGp@zLyrF3(ij)+g6P5I={%0R*+cbKP^6 z3e>PJoV6wPAKx;}(V=|mvAtSp0T3Dqliss7b*vE}?xuPF_l7(`!gwX%$5Qf~8 zF|w~v&SnbE!y}EPN5_W4ni~cC_;TKH9WPX_U{?n-rCBcu*rs(^V`@T#RX5exDEY0d zu^U0aPFNw!w&K=ytJ~7twnwwFG58XH>@@eB%;)pCsaZ~+8(09i!J4!eF{5USDr;R} z&aQ58Lp580oP1xz!pxc3d@4}07s87erY8d`cozM1Bbd#n8nYRa*~E+whsUZ>w^$i* zFT0*~>DaE_Buc zi`HDY7G6lyLd=c0uEoGyq!wA|#(j@MEaUO)r$E}?T7+@RjGpd7%QUC* z=bhV%7(syYVP|-S<{s7rL|(K|o|pjW@Ity=cD>kSrlhcV9Is0Q9be2HGtXWyUvmA) z0Jy-e@j}PPj-K+uN6wr%b;j#JqR8+;%FU3dL7N% zIB<=hXgKPHN||ELi(bs>^JORJS(9j5rZnw^3sPth4fdjbGnY?sC#p#MJ(@!IWVdpe zCorqM)*xn>bKHwiKaKaP$ZWY#zIdM(eG(|We939s_Z&WM0a^aUV8lzrNW@3itub!! z@sjSu8)DMS%UcuMSI894tIWf5Nb!&O1#}vL*ZFwC1*J9X=Ch8sg5=2)_oWY=8asOY zQIa)n3y+JgvG5cgWcy(u%-Jt5euXm1>z`; z=O?P`PUO$)jGi4uib6h{Az7C^J5#BYwF|_OVi&_F@}-%}UgWS)J9<6hs-aQr?Q?)n zF@av`^7L?rK|{y!+5ol?X%M}eMZ_wFj7yRV$bHsvb4A&eUbr}oZ4N+qfQg8H(-$)! zZ)8bUKzn+@?D)j;spy$;avHdp9Y`bp+mYGnMG&y#fPrx=kwN@RSh|F`EMK(#aTV;x z&mq>#1t5C}KP#Ho{hls`W1teZR z2C;NHU&_1bG~t(_&j9VJy?*OP>aTCw1C7=ZvVissNUPAwdaGYQ3JX>&6Z@`o_D1%c&k|_|1Q}zQG z$8@k|u=FLQ*Ow7^wsK`qhC|XgGF;O2z&IA}YUBN4WQ_MO6YC7%3)9GSa%@lZ_NT)B%WD4euzKEo{%HT&B z$fz}|lsnS1x{Tm8{2a1mF*_cL;h)525z+R?Dcg^qb1#C%ju9C330nw18~euWmyo|- z!YZyhW#1&QZ@RqbIdlaKq=1iefVQ3N8zRz0XTn?U0OV*OwRAl;NBYNUY%VTauAav7 z-GB!$Q2^iAR94OA)&l(kkchibbKQVPMoKU0fJrJT$LpzA;GmvM+R^Tt$zvNr@nY>FX0y=)SteJ zbO6HKfPkA|0cVO{cwA!zENhV0+ZZ<=nf?^=7$V=Q4mMg=@4KJqriIQCKE#|m5D+;5 z-B{B>y$v*rP(x^QXb~bmJcn`Lq*v@Q!4)9#CXiK6A}~$P6uju!B zE(F%O;YhOs`n_m|<{r_nFnBkD>psW<+RLT|>_mA7&_g;NQRkOUjqJjoLr&Re0U=Dg z0d!h=Brs_VL-!8W@)Co22C|bmW-Z&fxe(<}R^t^U`3d~|o!lFa1!G~&*erf+e_Vq5 z@e>(D>rTc_1u3gSxPp;W5nRKDDf4=tGz{tCJY*=qR&7FaI7sGmkNtr$!yyYOaKl(mz;xO#jDe9B`pAodPl1R|dvPIEoJ)Ck^17{wq&#aJRD29V12cpQhE5RY(oq|1S_~3!j9^?V$Pi`E2o8DZ=?nmK7h3PUoeotk~=9h>gW4)mdGwb z8uclJXhSHYC!8!tss2YMoVCix{UL4)pq;Wo(?bqegJ|g4@LXsvJQu+)d=*d%@M;Y2 zLh>&3N`Ucm(dLNPqGZZB;;%sL;6`epd21mGsbb9SShN=63T4iPAb!~ZhP@LE`&GlR zo0L)*{X4)q%N~+XWW3rxt^T0W7P*`d0(> z`WLO$m^Pn@jkV7`I~>AK=AQUF&$%CX)}A8+gxLotjrp$GACwtyF8k|2e^RGC>!SW~ zBvP2R9~B-_?$y{)9?oFoFY zbgd{A!obf_Zd<>#b&|}zap{a7d z1ZIyIC)MFhmM^6#qN?A{l(L%AC6U^ARvH2O+qoJaOP``pHvg9y@#bz@a0)Tp-nb`ss7WPK_NqaQ4{QBS#Jo z_d);*(M-9Bq0i-%XO}8I2T{zFK-s4mD~yHL(-M;gAoRM7Y6cP((Qb66)BY_OgCQ`= zX$1+72Gb9ym(L>0L>XeU$)e-MxQcS5iNjz%OC_(XEvez=DOA=;1i;T<;ypp!vb7C) zJXujT()lC2_C;}Vp>-gxu_>bAKm8l^%xwUXQ&|5wgLrs3@G=#8f=mpD_(LF*!^kKA zW#6+lASA%aRY-Jo$VG9$Odw>h#ID*#JWVh&WnD#@HPi>#Iz-fCz|OGU1_%hv*|>Y~ zlf`M|#R9IP{`RjE@G>qw29iwx?)lTmXU2=mUBJNKvM2&VkrM9&fWqWqpM&hly0?T= zO|dZ;ry!?2>`Z1VB$|lr^k)!UhX6~RhY;m*wlJe|`fGe4b$%9iQfC4;5&Vk<1_HeV zkDGspx`FpThkU@iHq>3;V5z^&K=$!(@s5>vJ3BEao|Ao~Nb8`&4IeuFJg0aD*wA&&Xu#~1Ea&Q^wS)h8X)a!K2 z;M)2ZQLg@d1~RY`$iVh4ujB9840fO5F8v3%de+EI!^;FRygP6M0Y4BJ(3W)#B7GHn z6(|=lZyO|_7KPkXAr*QR3IG*0uq-jUefwwuy2I@w755ydm=2KsGk`Fl(=9JJotqWR zIUM#v6NU15FIdc6Ht?dr=ETtCEO||6QQ}6KN0L6(!6MV7sFruKo1FxLg1A2obx~YF*L`wfQ;l@x<2q^*pY+eXKA0A$WP>KDVf{Muy`BBZK;{{9QOJ3Kw zE*IHIkch>KFR{cu!M7RwIRY;<3!N$C zsnCi+Xo_anTYW3xVTZID`t}?!Wxd%LXztnDH+u|k6`EOBT>~;x7q|&mD6k>5gWl*Y zmKg_MA9cebP~~%k+71JFO&TsUA8qCXfJ8Ud$ysWnyf`=%Rcg?QtaZv>26_RWq7a^n zvW%kwfe$be1#j2zv~1c03U{;X97RIm7|3QB6lQ6m6zkMjHG~bZ-50}|vHu&ED--ay ztS>^998^|8XQrtC7%~0?cy@8QNC3tKTs5pJ%0_e}H8oc;{OiVV$kzXWm@K2~e@KT$ z`b(a)B(P(k5SR;8`#29AYszJyl>@H?BHsp)zpJ5@!+1l-Y6umPa}3+nDCiab%l{(E z)+?PnPs0EB>&eH!GNtX?1!$5>8yc|sEpiV<7nha1C zCu+EV#BxL+AqOwKw+O?KLoFdZ2nc73k>)?@=kjVIxO7{ zqq`>>-GwsQ?`~}DMrdRr?^dG-k~1HMzTxc(XOQvuM+nwMJ z*fm!uh_=lDx`>(cze6j%?gI|=4;&FArHdj`Z|0khBVzo5H$eOXGnRpi1X=-nVd{sq zV8DqZU%<9O?rx&4@&=x6^bt4tDy30hrxhszbmnv$`P+%a%rCp8g3=TS8UPG?4nSV` z6JaT-qlX#^_6GCdsxhn&0!Tv0vl{B5)c`;k;t=$oV-g!_i49Ldr0YVJq&id_!yd!1 zcLH^Ug`i5Pt}C%xnEQGz;&#-afYKkXg`v@mD=?j^dl1CcT(mT#dgfS4ZwvjwYh37r-ZbVW5Iazdp@h{^&S1^L{hDi0lM-#{sCB5d z(;*lbo~cFEIx#gIP(!6|wcf88Ij)7~;^_N^S_g99p_0hcJss-j^4eI7BizLHsm<3q z9tg~ZO1((CMPlx}7Gw;WU!&2L-L!SAy0UJRfvKIGIZuO((Q4n0B$QprnF?X7lLTd) ztj0`&y-EE~n6v7g&UC&~fi5m71blK^sq7{*NiI_uW0jpeC2hmi!R5)yxDy1G!yTPDe_rPR)A@>MKJ?Es%jXdccWT&eptn%?LNDb7XLJFSn=>Je##&*4FoJ^F8>Y~qQ4kka zDj~KjX+M`$N`*KuW(|Cgg&Q(;`Lu84sv!Iegp8to$QrQMf~SvJ{Xlku0D1k^7I1|d ztR3JGbq~_t2;VdEz{sw)^~xC{nm&Y|aEHdAjDlryh#+P|mceZySe%@mg#fRVgn~&E zrFfyqj8@R0ynzRi?0el6Nm6H%LC;3X9T$PNmqTmQC6oRjyTiQ$(O$nab=L^~-H7?@ zb~4LG*8hmAIW^Mg5^jWKtB^Jgl6=CpvrCXf;0l=^rEVX`J-`qQaBxT9 z6uuUni`GK=sanK&N-P(mv|M<}s)h9w;-;XG3D%;NcGPIy7HOBf)%U!EmdF@_iR~pN z2d)d|2g_+G@R*h9iBy+ts`@}_ic6Obkq}bHIWka$&{=C0c~6|kFswR*9YtYEZ4O# zFX+Vr2H(ZE)n32JMF{5{S~_Jp4)s66xGwwzKmQ&|q6HmS>^#jnU~NP(6|Ap<5IL0$ z-7Gdk5al}iDVmifK$akU4Y-?=O`-1lHq1o?15@D|b7Kp@y^EIMVUR<0JFQZ$lpROe zFV+GRFf~=NIbh4V7;W^PJ2ux*?MB|t8gMn~#tZKT)fk&f;MX-y>hX6BIR#uA7i`%n z7%*CksRS@#>}lk93cp$u821}B8uov~?Ut|ymP~9rjwvv;k6%NAg?p}v&anD!cLfZX zEeFDC)hjdza=Cq=4@f0I&`n&ea8vg{2_r{bE~!ZutZJ)m zK(P60c|VA8x7C_?V)oV75}-!dLfI@3i9d*bubPgd)UR8CxlSuk9mM+q$+@=HiFQDE zZY>>I~n z;gM6f=Gg@;a}f$!-XL5d^eNL(ky!y2fIB21W49n`AJ{BDRLkW2=F3tu1fawM~povmZg1H>^!nds9+Scy(nRDYoP{NN{sYw@Hpb zv_fjHgdTx28olXPe4KgO&r}1$^t;Au}A7StS0|7=4@(!nk0;xU&KU+q|(Mi0e&Qq32s^*b+M_dTk$4$u-MJGvc2d?r-nTddDj2M95lAnO zLjWXVA-hX*(ZRa9+P%ZIh3=$GqG?kH31QH7~SX`-d5MT z@r6zm4Z%5pS&anDG;NR`;OVUjE=@6eD>0fr?H z8?$v-Hu8jlAc*RH-!H?X#K^ zFrEiHO2ABeCzyC9+gbqNc>4H>qxyDd|*#t!8z;1j}9lqw%owFU8oHvniuy# z{3ZR?UuH}gVmBVAxRlAekL6~)6%cv2x;%TJQ1DH24UlTCAWP%S<8tb$u`zr>X? zfZ{1G>Bhf40Wh?^0ywesUcA2%H{ucpV(aTxHHb?aC?P~A%$7w&vb+`m`E2PafD|0O z0Z4&d0ko3HI0;SFBIgGis5p+uuG@~d#pN;r2Tuv<-))$Q2ENG0)?vVv?Llu!5s$Coxuhb1`tr# zXlcWR@+BbmjZ8#j1IyVr@%~|C&~XM$2rm0h0@-)|5iV2E`;pY&btGmZqGQGJp8bfw zZdb!2vzcO{8XB1`7TP3Enb zEE$~DA>6ms$QdKtl^W0EZM{ZpUucX7=9^-42h5bSm8Pturg`Q@e+^V3EZ1DfvKFwF zVGI$YVpj#%An#O1Yi_80x#5NB0y^Nf2k5W@r85#A5jl{60~Nbw07+ts@*q&60$^~R zw-$OJrT&87rd~*@SKx#|$oVzUwjC_tnt-Q98{=r>U#m`crkKSQN6DUc)IXk+(E8PP}TPeB`ttUfT6^z zKFY+LC-82iAijP>WlMc9{#t`NWtFaiF?=;bxz^s%YSfo+-LM`J>aQ9Rngo)Jq|h7S z$1-m6M&fY!QVHkj6fwt*5Ru1?ASt|oqgV-mJLYZDkiL5%+Q}B6u_zXBoGBIc@2k%o zV&usa+!2IQ4Nh-I(kUminGz$R)K1pZL2|+~gCn30XG!Cc_eHmNwR_&FhvI?QrA*myXtqz>5BTFu72l$fmsJp7G??S7D z;GRw9E@!e37{OwFZ*q8z>?0xdjQmDa1)o4!IOL)Wh5UIhETo|qohejRF-XSr4R!CGBTOrxm zWNGGX+sLG28XNfqJa8k+_QhhK;)w=qAL_!KXK}LX~TKFWZcKIEIRiY`w; zDBujo4d{(G&vKXvl3^Fj^?CF{>@@TkOE|?Ki8vl+9ET!KKfxFQfgI*#gWG_VNcqcn z-O!lCF~&T>pr1x;+Za-q(4d5m0T4qSXVcmaV8IqtHAy2n03oq>)st=r?J|LK{yNeKi6w8g;|2-16r`RKU-;(G`cfJ%SOh3%M$qx_0@u&3#*$Kcw9XDBZ!--S)t z3ivs&D&Hl2_X=qIg}z0K3~YDhRceU(7Xz;V{Gko+nL`T}R?s>QY2-c>qj93#+80NfW;?pn4_eWId%?p#sz{)pG#V0F5*CzCdX$j8iO%CSXq@<&v{7 z?_RSm7|~3>T6HC+21L?lEcv1kyhL6L>aFmmu<{C5f5c5l`@U9-fV_y1yuf~lxb(KL z3P&j;d$2Ktv+77`z%JYSwGE)9jc=ZTUZCeq@xx?zlP8+ z_&vbAtO(v?bYZ(UH8dBOQrC-8WrK)I?(p~2`l$^Ichq9FxIt@f80nL+^AD*c{XMu) zl^|vlRJ|f*Mf72TBq6u(N9pv)o?9=HMZT|rtU_|2p>YFxY{AH zJCQafHIA@GL$|xb?)Lf$LkWA;C~O;dp*OqPo2x*3!+XX=&j)2;6>@Vx5GX?h`q$4U zg=b25!?V@}n2PYY4ut1XR}RZN)yQ_Cn1Eu!(A?ofc#sC+VV=lvs-46-@;-UjOh2~p z-n){J(G*m&n0Fvcn4pY7XHX{$zp|n?1a+UrvwndA$$gNrBcu9WJZAB0{E=!Rg(O_* z5N?Dk?Kc~ix^}8j=Y0!Wfj^+hl1OKS79!F8fBD#;6ZIk)2<-2{uSM334GK!}W?8c(2gF2P1xur2sJW#v_oIxzsW+xoynJ(G>Y94UgxiM> zjs<+;D=VrR3Lds*H5A!ykE=x)B*| z8kS4nEh9|2ow)r!)obB+%up8;KWwO=rl8T6f9+_jFj6K?^3L`nkz75TDH(#+)njZe z9LT6S6z#(x=r4&}w;hJRX9FWh!C{C6W>(?$7A?H?9w=u(V=l+RqQ0cHtD|$QhU>E$ zKA&+Wp+i06lX%ro62v8}Ucf)xu(nlgXBaCR6$i#oZxw!o0vhg5O%UKWo}4k>fdD0BvM2qc!_am7-$AfnkCQ24g_8t z4aDg=iU;+6#*Q$U{t9{LQzf+iS) zrlt>~q~72Fh}8thRvshjMJxzbdqZ$7Sl%Sw^N=X5Zg`fMq->v%UJ=Ch!ECh~>31Mz zgQYK_rnVhTGOR(0{V^V>*(n5yAd8A1m@H#-$^$hqkZ3^#6e^MfF_b4EMlxh7sv+4-Vp;eW=n&O>K%nQM^C&n&qF z^3&2Kz6@B3V-=+nT2uK5+6h*g|Mkvy~ZW+$+vevtb}KgK*8M zM$f}aVyZGx-FiEI#Uvc{(+vlg-j0Uom)TMh1yx_}Rf524W(ArqO>)4Hg3nQeg6FEc z>t(|lp>C+s!WQ|4Mx!7}W^&Hx0}t(baAse#B*V9pQrY2)HA%Q|F@PP;XOk!Mmr>E_ zx|f}i!{G~1g`!-VU^hCP%y>@lMqSJ%U6gg(SiHJ~ zq?_3r;`sm|n#8niQ_>{_{?bgAZ<-0eA6o#qU`S1kHP{X@R3%FSuOQfnWV4&vW}o5K zZ$|#ML15_-{Dj6JvJg}4Wc?%!>Ovhxl|u#|THV&uKj^VYMOlJQGWbab%R2d4#O+`w znT)pCutHidXq9NcxheNG&ixz(r{Cc~{+)&?vjdWG`M9bX0FBR~IJ7_*c)>%r7-1iO zv!dbVIK%b82Qcz&BY|h3wq%@l;i2h(g8qRZm4rzM70?@pbC?baKu!lmR(Pkg8 zx?g8rVLB+essC}Y@R?KVd>8?bYrGtMIrMUPJj{+*Uj;u6i7;1OR^ks`V75k{ISX0jw|tVK8l_h;fcs>LG0_bT=&G%B@em) zxfiiiP*h2%26z;r4uo~lQ~0V)D@q=9eD2e85Vh#{Tm_~_2VYPTu$a-gu*(^Ql_YX? zNC|ZT39Jw34F=W`K!&9au)64&ibIwjloD*!#r*}TFIpzfLH;ee^-mX0)IwM7>45$g z+SCbq2eINr**#Zj+t4dDgszEY!-W%)W`!RoHVhX|T%~;^Xaxy_Qwhj~X;1OG^l_!% zJ4p-lo1lA0yxj*ngBp)+P#CMo@R!~qZwKVSn-tYoHYmbI>>z|cWzGZtOidcH&r8>k~_<|_TmcD zKNqs#hiL4(Q+9~Z>oc1$?Y_0q;Z|WZxPTon2|ykqG-3V4ZewOOGeG%&RE#ymOs}2~ zKmq=%0+1HKwg6)8iE1_eIOPLk&nfSOZP6b>DtJN4=;Y<`By1IS?gTLmnhBqw2I-X;eXAz)64v2o95zjCXh$|F$qE6tafsh{{xY&z=X}%7ZOF%G# zK#xR~A#31!Av+GUsTl4f2)Ee%kQalnmfu=j2N7T0P%p3vsOvGT;+)R00c7fK0;k;G zvYknZle&B_z@(h?Fb#eV(SUd#Z|i}$?Rg$q^$U1iU0biLS$*Bpk1qn&d8E^%WN{X< zn5lR{%7}``9!H2R6{NErH-yVX>VThvF!+{} zGKACAlv9-lU$J2N`|IOaHvrJc>P61wNxyH(s11?xsoQKudFbaYJu;L@6Mgfu>KR9LBWgmc+lWPAn;{p z;ZbIjK_YEL_k?wSZ2ef!oD7ITp&Ypi`;x%`;&8lL3X%=6#MYsYpF&AsE}(L^D|yXT ze;txYoENAdW_CAzdrUWF8Kv>wMNYH+*I|#Pv$N>+%^N^6q5Pj=??1=jry0nk{%hU| zMCfRgK=QmkV*dv3v#6=@$D!fEjpFBAL|||%{ELJ%Fjpx-Iur8@T4y}?n)N))V{wi_ z)D-3f0$eoV%z^5r#uEa12;ddO>}r7Si9fLt>WDQ9me@r?5RQ`xz{>L=uc>ogjj>ci zYjDNbtsj%IgT)>WJc~F;utYVy?wNab73H$qT=tmDJ?3(+x!h+i_nXTDa(T$)d&uN_ z$mDy-hj;15>uUxY;>&Z zKll|iUb3D>F;X49b14}&uLH86oaS)^!OLx1nM;#Xeh;U60D%#C0!mwJmWw>iiux)N zfi91jljbc8_I|v0FP9b}nMi@x)lZ|htl7E%SpNb>3fp`m5({LeR6bvV<68X_j3N{; zh5sTF$`Tf4^b%{>ftLY$3oJ~2h<|?<>6e^Kr;(`*?3#x9aE!2%j2~P;(~(0z_c$F! ziODwNN%!MD{=E}?z&O-SboB=MkREqnbpWahEF**@G+qNRnYQ-{C2E|#fpP+IydFXn zlnX^DEF2&=!a9dr8}yGO+5!7^IE6r&Hm56$g#r zPD`LX&WPtr=AguXLKe8p#hJ-81d{L&OS?6=3=Vk_7$`xXuFs=9SY6|crvqhj^iqzC z(lJBbzyscDlbTv=Nt*W4y`RJ1hf0VWV)zYM-4P?X(_cl&ZJ?IpOFY2^CT`p~rn2{i z{UA%gy${tS{1z!F2N$6_?x&MojE{`XO>32CNoWZ!0nr6)p&Dh1I_1`>fiQ28dP_`+ z5sU+>%RuWdqm^JKIimVkIks;wIL1e_v}J@^*7YJ@eWt#yg=@XLIXEKmsSi%eGH)nd z%v_rK;8aQL4E%;db4}Tt=4_!Yzs44DUQ%6cgPls4N10lGioV9Q0?=EgsI3)0QEzJ? z2<`N5A$=QwN0kBYghTi_+-k&oY#!bc6Ee)*XeN`e$L@-ou1a! zDRp|{G?v0!4LZ>OlHDh-yTrvMr;V1lCYFe=G|wz732{vm%cV{R)W$D$Gmy6=SC%My zTjDBS;#yiFzFVUHV~G%Oi7ds^AcHjw)-f1jKsk@z!OA!CaT9~h47M=1lL2?o5~pX0 zl)}<50}^yg)Br7U(JqlcSQ=%pi@|ONdl(P~FWt-FJ_dxROBAmyQS7nw5CanNOO$jh zQRufsInL6<3?5;ypTPkJoZO|O47jkDjx%@*gU1-0U~rPb7=u#?^q1JJxAO4}gL4cX zXYd4rCmB4&;AsXa1T7GQoo;RzZy;OFmLJ?Dumg7w?KU9fr?@UZg`@!p?*7^TtbbJ%uzNl1LrU3eL`AaUu15^- zq!~e^By1IE3c*bwx~sqLGZ)ru%i<7N)p2?D^6VZ;`ajQAz(ND|25T4o?Z7|1{`-#i zw~_jvKi(YemVRQ4TE^rS`*_O;H^xv#pXqTN_ts;6oAO)IH^yDB#vtfH2J09s z+f^9COOqJF%d@|Nw{1hnVbmL#hpL7wQEvS&f|~}bX5;Pm9Y;v@8hyJ>Ps!f!KyoeI z#J3nvvVI2t-loTrFVvFHjO!1qZ}sx}%$ojz^+~?g`ph`vh<$UMv82fii#S{p;(^`I zs7n$~>Y-aqLR^$pG7G=iItve4=AYh#*xOG)2or$Agd)SlkcGG`8B$PcEM@8D15E!2 zEn^o004c)ogGPd^xjEo1AVQX49i#xPv`NpGg2 zp8nS0rM3L_DS)F?SeNE2>GS2w&>4wfrU6{^8jijYX=`x@$;eM7^{LCSZM~J(7~ZbVNb#kOXDQ# zek@3s#9O#RyUQz3p0c>8;j!U8X+97%RRygh?9j~iqLc$ zG@IWeFnQ9aZ(#9b&aHED2HMQ+Zz2w=)_h6-30kNhV3G*Ez)tY!1vqE( zi(;gPl>vWTq-fxgOLzjXx!Bt8lvOKkzTl6DDZrk=6$Ncel}&)K7<`3V(y)nJw4haj z&a@#_17CsTuMv7aH|!o%Ezlz^+OP)(-q?r1qZ$2h%%?iIR-jA`W$?L$w%KAwNZSkh z$890(AC6vKPVj8!`p!2BtU_Jl=~~{bN{f*Mmm7PWa;4 z_&kK{I8rTQWJl(;X=HaNI5!v#&@GHVW4Gk_gmNO!0U>h*b(qQi6)yG|^R(ylyGoC z#3^_$Fb5?8w$$k)=o=@nw!&~f0tAFK4`Qf6Nz~s%eckdNd6&J%SwOB$$sbxTd1(j- z^)yf^=~$euHypn}p9bTo1rFi35C?NJ0sx#i&c*Fc8=ia*q9kIMiGWiry4z2KGJqQ` zu^Pqg@-5lvz~5=kpt2B`Og~2)2jw>_4XY5{6zz8Xoxnl=9ad}GJ0Y)b zlTXN^_I#xmnl+kTKw)CD*5z-pD;ljZyP}a^TC)7@KBUI_uk&&3>;HOq8L0(PF20#j zN$V{br0cfk1~8Ub?K2>oWebO$kd5*`4)>6?!3Ln=M!Nlgf2@7Lk2V@CkoqloQ44rY!)r&?IrRR zQ5=uu;pvz;@PSmepXzl<^=UtqoP2AfiqCbc&bc0Hb84OaU~%VqRrlNq_>M{-SLh^w zIebR-!8=6^ZCnZCzy#9tyReXeNU^sTTMfR3y;3V)wI?i=F}KP^4@d~S7JTk&AUOu+ zR@YY7x@rS!=!I%Ej$T;$4L4DXFotL3^c&`WZjj~T%^)nB5-eMRj7IsZP5E2V^@mN<2<0!}n z>TfW@{eI6d+c>en$Z*9ZEDz%!b=xOK!F)l$^s&wG+MjzTx z?|(@3npR^52f$kRGd686tVaD@)s69-yQ6jo4$^Gnn3hh|?x^*^Q&`-B_8)Mtd8xQcFTyQSXJ+3ub8J z+$ML;)Y^r0wT-n+_~uk_Zm2e7Rzoc+t^He!KD?!|U3k~`T1daBBDFhY#|7{dS8Jwu zeX#^_K&j#mp(M`QM%V<~*u|S`n`@g?-x>%$=C)u~*Vnc*TG43RqWD{>ZJF9opC^!F zocG3BudQGmwV~SP1*Cy>8uqNT2&qups@ee1N*rlE#e1xaJLqG^#G5@`TM6>MSA4aF z+~mSWwYJu))_pvrhK%hoKEoQiVAlUCpqus8pBXEccAf;`4lhmebt0b7gNKadwy~3E zhm*L0lj9aZ5^3bD?BgX!@DHz)%@#QOB0Ix&?AG$aeZ-!1hp2Xi7`S6{yR~{IV zq=H3!do@z-U`bAR&+MN;HOrk0U>=gUgDj|Ysj@5HfJN_gq0!r3tNYb zD{@IMS}K?5Z9upi{qLE948Iq;n9p7EB4)mfZ?q`Az{BPfU!LJ7m*|TFp92k+VI&bO z!5?-MJa0N*o`J{CqA1Wj8wr1&HH&reovAo{rO<|y`Qn`Nv`m@Tfn4TR|2kTbie>P% zPP*Gpb&8`6c(#RxRJ3s^sPHe-CpXS|70noMnRxbjUx8|0&Zgk(@pAgoq)CgWrQ#e~ zIL^h;rr?TKLGPZS6axoCU>A0Q9kz@3N+{gYLC+0lGcP>8XBWumF}`!L8O-9L=C@nEb%D&|0Z4msUTYhUY2bZu2egX)!HBih+GlUF zwg%(YNR++_SmIWDD7XnrOy9yeGjdMylo>R25R=D}b>245cF%!2ZJTfL*gHuJ0uPmYxf7JYS)WFVB0xN|dKM zVEqY7suSKQKC5t~llRwxup;fkT{jrn9>B+5W7{c2_=+)njF~&yOTZ`%mf+YEvY>Ot zjM%|L{L@i})1@N4;KGIjqk@CuF3jkiRO}KU$oX_$=>kS674`FO1c~tiM z+pmlqs1EXTzfz1iihq+dRK+3cIVtvk(f(i57jFm0)gtz&_&qi_u^j$~vfejZU^@IHx4Q@SF@ zi#)5M|DMsy)@%=b+f>qxk@y_fE0wUYpu@_di2={G^MDXfx5MoD5{9H2%U$GWY4*1J z^`o?x%3oPVZ$UarA~1y!Tp^IgQ5ulj_%;BR#)7qoI}ol|WYmJggdR(SCwf74n-mcF zNs5^9T8%TiA^icko(N&eBRI(#hpQ2stc6QeEGvBx*9aU+L}^+Kqp1(j^&6HM-IjLH zu-2H}`l&yu;-!8pr;dKqI}m_VRva5rU1(1yNM(> zG~#WV_#+pbxFXkooWp5uxV-@CpX;i1P4&q-Ew+VJf5F063-5bMkM!2kNBoYA>^)%}x z4Qj1*n{*`R8YwrJzPSBUtI#sk2~(UQbHaOJHHg*(tqs}_H1G^D7!nvWtBu!eYM1=h zY--LVzjeZKYP{Pl?_$gA1wm-TA-0*Vh|Ct9+->1J8+UlJq3;~4#?7J|$vPLSJ(IJQ zwExV*3^$1^v5o@{e**}<&w)}LX^L{+eHF=;Z4P}u;{F{s1VwpbTD8%9kE1vP->m>A zEmFw3fmJm;!H*E*v%AgK3c&++tqy%^B(s&IKmlW|9lmqVE@y`>W4Gu#*pRm)s74Ws z4{qfCS=@4b-C0mhtM!&B=p+r%8^9NbyQ= zX@bZTFrI3QG}NBGIjOIsRkxqRwRnp;?1P%XVsBO8<01cGFeHxf$rE}Qqd2Ce_qUr# zuXQHr!14Q3*t%%iE2HxlREEj%KWB5h>i(#;x~d(IAJy;U)ZW!H0uGM4LFig8^Uv0OA&e zSt{ydcw>`3f$7k6_Jl9Sm$UF!P{wK8tna!_P-(Az8wOJh$H+Co-^^?US|gxf@n#=6 zKSOWua*!_NLnl_A>NI;lO~?dd8{f7zIt8(O7bTOR=woaE^$e-b=2p4MCa}AGuYQG- zcOC;107j-qI3CN51xZ0}um(T}CamqC80jY^iT8b0_xE6}iQn4q*|z<^%r7a|2&hWr z_un&394RA`E)t4Z`rYV#wpEW&_px#a)jNNMmIIdueR(bp$xe{155m||zB2)Fll9zJ zAjp7^4=TIy#gPrb**NU0H`Kz!+B^c(P=iAtX9O0&fIR$zT@JAlFr|rw{tCpWkd!gr zEl&Z0^QJs1+z(m-IB3}$ik9e-eIX9~R#(IO_2XS9l#n#IM$I5|m%T>}&e}p4l9&%d z<57Pb6(JAGn~Or7DX!n2`v7PbNsA*w9VD6gT_oJac=t{oQ0`!V{8-e*Se66YrW>L! zx{8P24^SXxEE=JTzcWY?6TUD8!WyFy1l?jyt*9~V6F2nCm4KR{6W&Hv);dY8K)f>q z+NytH75$6AiEDTYLL`jBAjgv;P4=r6t>M@eu6P1Dzd^KwT|Q4N{JIFXG53K9BYpj+ zN0!L*idCT4EDhUB-(tjj2#G`=hmQs*+RZL~5AkpTmE_773#KTTHCku3aI#{9Aj(={ zR$)i&T$st_K&Okb4S8Hq=NPV-3S@mB$`g0RpyuTpw@~)+liDjXVA|l@bYiAFvf zVwro-9=(gLYg#@$$wwiTSbN5??6z{g_FkF8WP0hA$&?vGu6pc+)GiBK%xwTuf9PnDys_PxNfl(pnPjbD$y;oZ+Mc$lLsLX8nVlG~B9 zMmTgM_c=tsqgM{lv5{O9w--5)%h;T57;3{|JB%BkP;@8b7&0LYsVYoZTi_OIlIFg8mqX6~D$G@7YL!d&G-Wej5{Ym=58iuh7TabpwR3EXPzVUsU+4F3teRLXVLr ztrYD%-|}N<%FD$y+t5q2YKKjjC=k%_5V#7e>db8U+Kha6m$ zHriue_l)s2l7{S!U-{yHE>lds_yS&k*@>P2w^vm!p`L z3`;rB$KV(S!xr+L9u>H#aQQ=CZ2jMv?0=Z3UKSP6t8g*F0Pc<+Wo9a-qy$(OyovYw z82FVQzg)eH4;bTgVas-bd7(05@jO1~~unzQUiuclq;8D3A9sZ~nO{Y~k z%kOU!=)%JZocS5|dOyXM9cJ(;HdJ;rB`Err8QhHk;dbk#uwyd$HUw7l zECt`1+sKRJ zzJ)F4%GZ>M@ayC(Je?NrQeNoj@e@b%IMe(r(+Cr1+W_W~mn-F->CytyscwToYxCqerX{gDdZ`aKyu^j zD>$0yMe7kbl_(Buy-rFmeYP*vg^Xh}#dF4sm!|2wIJ*(AHhL!?cQM$*fOyNezU3a) zG*{Or82mf~64N3k*3aUOhGz06UU`~!o3WN?E)53}_%c!Q63^X?u7bd76#QXb+Ra2eXq z;1Yu$Wl&=90S2!#Aj>4jGtJ3I9_BM1J9rdFzruj$Kg66}Y^)98LL*g8<43U?6uUSv z7!z|LL(v@K+szE9<`#*q2;77c7i=%MNkEzYaU@A4j-TXuz{fLBoH`>oLcpuHg5Qk8 zHi3~JKaoi(=Pc8Q$B^=D3uq)zGuBP9ea1B=4MKo*(1TEjEXM8r;p3I9|3rMxp;bX{b9M-ya)_N4g_OzdII`C@VWC2)zwI&jw;??r4vFrdMM4F6mFV!XHauGr@Ix=64)6pO_|1Hu0fyzvM5 diff --git a/utils/__pycache__/metrics.cpython-38.pyc b/utils/__pycache__/metrics.cpython-38.pyc deleted file mode 100644 index 4a090b1b45ea279a3c2220283538c39a5ca166c9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11600 zcmb_iX>c6bUGIK<%uH)EIy{m_vTXNyy&KQQmMmpAfN|{gdgILotZckaLJjK-t$96% zG}AMF-Lj7Kgj9JGHh?P&gd@9~VgwY0IYJds1yqGIf#O4e6dy?F3P%!PBM1e>hkV!% zgx~*l&uAnYNEMl>{`b3&|NGys-!nRDDfqo@@|jBayAWG6P4YMM8D2pB5kAZ3P@m^#_)$KOk|}*yeV zufOsM`u*;u+Mj=peqX<|N2Bi1R9KGYZ?v!)bt_Ug?2iRA6yWMQxexlq$Q&%i*1dXP@nCy9>vmK?Xjlgq# ze*)wBG_S(4s>tD;tn1#T3T?FiBbrI!k(;g}5hwvRdq<`1aXi4jI;U_pqjXuodRho7 znp1Ywu7;c*YCw!OtK^kXk!hXMRgG)Atg8oX%?JUh*RC3}qmq*DYCu?OWsjC2(gX-K zd*8$_GUQ3pskLyhMKdx)d&A?A)^xpyd7VgaV|UR=rR`Om!1eG%X=HAkpJ35H&*R$N&Kuq6!HaK}}&wSLv!ioU+D3ji~7^dx3pa z=~^!-Ye3Sh(jCDVI@h=kO44f7BikKCnGueJ)CLHx0(o%>Hx8%e9J%gTcRVm(!eKqF zysX|l{Q~PwbSFa=POdYo%nBzk(~&UKOYJJ%e3<9jI`cms9-+7Rvs)kMdc69A(o5s$ z)b3QAPjb`Yba*68%_v8e?hN*l39}=~PVt`MCO5$u8gT`7lDeyh7zi-;j%~E$Z80pUu&PvWp`o_6C>;)~%g%gy!pmgWDM??K(Rjh`y zv^$v^?PY;KtU5dz&h276LT=}!C(wyj)u8lJT$;E=~op#@=1_6S{bSmOrUmts4g4V@ll?; z+pixRsvk@0$A{|2hw3Mi`pKkzGMw+>40lz2i)fAXG?;HaY*jL)E&PL$c zLEW{T+pchGuH6xCrQt&$pRsF=+pZ@oQRXz8w&U^rdb_eAZoB@1W#jjJdqY&*^L9N5 zI{x{^#abhP{9CBBTZ_V}I&QP$@J6%gEWT@X&8-AypLPShFw5C%vDlA?dDy-zY8x%r z3w${&4IOmO+x88yfu*(mMu0W>_Gz!<2;pp>vAwM&yDjK-t{9I&eIQt>=oL>!Qk4`9 zMyQH*%RYB@X)wT0A8eeLD@`B$o`>EFTqev~mTaJn*HomvX=HH3(9vqML3t z5aW=l2NW`iSyEv~NZif?)Igp$^AK(jsiFFkvTh1JWIx7k>CeZSR7&rn7O{nMk-8qK zx9_Q?+=kb8RT`06DQRB0NuX2GsvTs5P9-v99*8m+DMogr zu3^>40-`Kp)g?TMdGIETD5QCi9X|FGDE6-cC}UZ5UYk@cEw4VVPO547A6FU5a#~h> zOg@w9Idxo}(+cXenpI8pxN6g%QJ<`uSH*iV(gE9x43L5C^;u+~q+o5;$k=2xwMvQ@ zrUS~53uTQg(@;hxh1Sz46`~#eyD%uadI)0zN?oAFAftktyIR*k&cu@n4HyO5t`-`w z&rI(fC`-elGkEI5WYRL8;%Q2)lTv}{(M_R627OZ8m{F)jn3{!RfUTF5R5%8 zu;zFgPM;xdD5Ye&DLw*6Vk&kT;o7AWw=u(yfMlklu`7@j8)PqBbKt*iB`Ey-rPpI8 z>Pwd@(wV`m^rJLk`U)O@0RU7gIRqvg_+&29tOmT&}EPXK*U?Dl@>BoP7t} zjSI@HJcm1qU85gY`$jn#)a%5}@WGOD^qWi0KJ;D#JlhCCMAd>#3gqi zLjt2m+^?*q)^$jN6yySCT1yjz@%dVZ(DEp5A0yD$3^D?`2k*KFQ;xyoq4}@rJ3&OyIJn|EByA9!Z7FG!YE~noKdF2SQ z0J*zWwA)pCwY}B1=t)XH1&`bI-M}Bz*^Y2+cdLWZU>yf-d(|Za^7Piy8G9Sg)-vA9 zMZ4M-;L3f2e4q@@4^~GL@V*-Wlm0Rp^xwa7#(wL~A_h7z;2CMr1Cl9tU<7HY4SHWY zV?RN?WxKEEa4PFachC&>V`p2Op=M-yin9cySj<5Az}js4u6P?2E)kGcrsjCt#c{z=5Lx?2 z5ScMDOID5i8EJG!tX3&a4B8BeHLOu8D$;%df?j^4BF@t~hi55L%dzTuH_gC!<*>LN z?TB^-URimS`hEnh{PO^c6&q-Cs?O$Mc;(f{*f>nH3rL-m|5MCjIpoHn@A5Fw=9!6f zPTe_nSV|w5;ec~OF)Mnm;E|pK7>ZPj3Wr7>2VR7lLyMMGzeRl@-7&(>^x-LEN;xnW zoiwRDIoC20G*?fBh$JNd6OhCF3)KQbbP)Vd28l73q^cAw)enqQ$JY#?5 zmEOPq&c|RC9?a#<|@5cLL>*^-5Dobf79u0|jb~ zf}Td<+Fi!=8O4LNN4N@_3^7Rjxv>uY7?^9&JouYrqTJOXA5G{i=&K+@5*p#RG$gpb zWKGx$b3k1E40KfAXbRe9R`#dVT#}mLM`g-fn}pPdgy-{!vLe2Imtjt`mk&*=O zik8|>{`I3@8(EmRSo=W!Q_p;PC%9B(dxY;jV(=UH7=@E-eq+ZiWrIeo9+aETc6%e( zBN`O%1E|Hp?!8MzwPaP>UQqVoDZ6_W)J4`R1WYXs;Ofps(1t@=^V-7Qi@iu1c#oW* zlJOM6D*hYV!g5tw(8UZ~y{(ercwRdwW#p^8zUi0rN(;`bR&Bv4_HS)Kjg<7;t%g?` z*+O)DJ&`;k6e>?LplX_8A4EEZS4!lF$I+Rc@d07l*Q5=&ldtqi6@hlLN6E?GdjsPj zprG+iqc~Mc`L46t7G5lSr9@1@R3r~QcHcD*PO{c%lApV=O2qERB6^CDqd_4?=}kv? z&<1{#gGjE_%Y%H@Z?rlHi!_?hx}{Xzbpq_XG>Ts3DqQ~xt^q`3Q8o@UL>Wp4jiBj9 zDe|jvaG5_d(5JO_$^l*^G-F5~KBqe{>{{*oA=$!B} z{S~5Y2JH&nxiB9d!3emOD9}v{RvZ?>8I;V0v$6y$51>rYuCU9H(s1uFv&#?(*SU^c z7^<7b9YS0;70&M0o+ zxy?bPoy7kbYT)3_U@aK^;UU#^EPN=mK@BIvhY2qQ*h#oikYzG`JUjuc(fUuJ{8TuP zwVpbfw1e$pti;x_6y)S1OFag*F)H;9Z(XdR2pk);?G{R#P7Cozi0(SVh7Abe_GY9z z0pxMYYnN-n;l)QH9Y*q!gv*h>wY^y{>CZgdeosk%TEZ)U;%OS0p&8r8vx%pUXKNXG zJezpx%O!1dsa(?P1U8o`wOo#j)ul4J$=9-cVF1dZpRu|e*WnfCWZkl?lWtaINZ$%U zJet0;i3dBWESGdqZ!e2Ssn~Fuow`#pD`jXwXNdJ>Bs0C5xBy>9+t@7Db@egz6#kDQ zCYgtzn$+gi`ETlaK}#c7z@OrjXCSQ4LmVGRI*b3fDn5rX4oG1Y1p|ZBMg|c}$XSY8 z%IKiH6Gjd(fCBD=<0Bj#oT#HmtdL1e!N1Y4*!yX5uf<{y@ZpNP_$& zDW*uw(Mn|GLq-%Jq@WEFaV2d-h%44boD|SN^nw^#hmFLLLUe`VqJ3sZ{1keFA{w~% zLwtj7!uLl;nM!3~trW-=_v1(9KAoFbuJjTAUw-qq$7gefnwZ5kd3o39B4fATUrsFikX2W*}-m{G# zI4r(V^U2eaHZ}!p2%pOodU29FS>zEPq0$8eX@F}Pm)Tm{ zUfNpTj*(4GWT+j-<#iUMjYwe}Y3H7PJfg29xE2@hWI>? z7~9b)2zCmLixP@(tAt>AarDqhir=T^e@NgD2>cO&KPK>50-qu9Cjcc&Vnxc8Qij(3 zze-Q%6dgPnPD(n9yPjBH<~8vJlpG)cl7<60^iE_Thw!HlhA2vji*!l^G2SeEqsAo`8W%{qmG&eKka zXA}fONZyMtBE6IC-}aG*Q=uL7e`NewcqB>N=#3jEDFMmH=DPHqNrJL9O-7rIySSI- zCL+iB9X0ln#RZ;%5B(tz-;1fL)K^a(`%#jJsY1QKN>xAd|I|Q5Lwpf+cxQ6D?NPpLFLk~ zn2kd$_n7!W;BAlO!42^tKxjA}mpk#*_a3=_Viy4Z?pvyGvNO2PbvGLv)`}rL*hsGl zcN^pUJzBw>Azx1_O;{^%ly?gB6-5$Svme1A{Y(CPn(QLJN>ls;fj1L)4B#HC{X*w$ z))#&r*HT0VZ&(|+0Two0z29S{oU|8$cG>YNb$FHb8DIvw z+CVuOBt6UkMzC-(v)>=g!bkD#7g&MkUe&=O_)?85cU-=VgT-_{A=x8^OB!7gd8#)O zlQn`)S={Nx?2$qMYmi>cQQ(T_C3~cLW1#`&o%$pUCdroLV8dwyvM30p^d^RaP=zE8 z`8U8;%pGx1id<;%cLcsn;41{i2-FGuBfw5}nC!(5Q;|+zxQM@{CjUg>p9#E9;A;Ro z6KD6&%|1-KlS8z-PqC6%CD{_CNV1LX2?}nAe<47WEB=+h*9rU^fkVV<4H4^rP9rlR z{UY`F7Krvd+PyGDv>@4Wb)MxR!Osm5ErqTHwMEF8P*yaqiW4-2k diff --git a/utils/__pycache__/plots.cpython-38.pyc b/utils/__pycache__/plots.cpython-38.pyc deleted file mode 100644 index 2fd07e6873a65cd992fddac4160ffd11cb51ce6e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 21672 zcmbV!3y@q_THby2b9!1cqtU$daNCk)jVzDewxso1k|mGVtUZ<`nf12U+v(}k)6+Bk z(z$n}na&-@tYk17#G1fT1Y^>}QUPy~4J-xOkj-WbNPyIa+K`2i8^O9l5%B7gx9D)xErDvyNAb)#LfNq+_*2J(*8R+Nq`TDQT0=r{&sL z&*b~NW!d}=$s5QI%5^BeQ?A4LVO-<2UGx{%Bmez`+IGar#na0z#3A|T53=Y-Lcfr zZ6|-MeC)QBKVIIacFs9!SnaxF=TD%#Oue%nU*gxZbrQ|d!%uiA&y zY4x0XNbN`JarJHLVRaCxC(66kht(l9dB?^$&Zy_rqv{Cq&#K4N)Ez5-P90UpaQ%RK zK^<2oQ1WDX;h#=q#b#sW^0AlE5%?Q*W}ArpMOT{rk-@8*mLz_r5vT?a#> zndMTs6-3EkUYCoitR~Y@`qGOt)6YFyxcuDJ$eVs~=6V!={>5jXedcPEF3p!qi-kEf zkA_TImv1apb-7+{1U|}Z)mCA)xm@<6Y_(A``Gpon7A32Gq3D;Y)hM;Ra%=v?^2+iG zlglee-r~p;bJd_x_I)J#{8F)2E>vas-qnjUA71|^K9MJn_WWwQyxxZih}?^nN{Lgk z_2c5^0)GBsfU<>cWSK35MRZk4c`Amf^~y07SBX0o7Co+#nC5g8ztpTXwO_K)u=l~1 zwAdqm{viNmuiD>b&0&iw_f9gjFQbRItb!E)E||2VEcEBiOb+Mc)6*x*TpoW3OEIr+q;C%)xX<9NfJeAPVw6)Ut}8(zhWbwHOw`yK0D zXU+aT=Z1&Zv+vpaxoq?0d9IYxZ^*CM6U&QLZStH3W9Rv*2;~@ZcIA* zA#{=VFnC|@M^ZmXZ~!1m_B!L8&k3;{&l$GcJMMefbcsmO`*2MBUBb^l0T2Qm0y_j% zLCP7i#;sL1bo5jRIzDazIs)BsYtFrfdqJ6X;@dhAP!mkL`XDZmTWtiIuRiI?$RoQN zxw93mbI3Cw&1M4k8n9&}zN3A#Uy2`oje7j{^H?ViundS&J_v!ioz|4EL zW+(4C`T-U^#p%8l}JOFwlSrSf(jG2y}P;&jn*3nKLDV=iT0<#Bvh zU~eo~YSqe`!#B^66d$Db0UX88=Q~AimR+2_j3QST7d}QMyp5H`Nm8Rw2q1 z3OAODHB+J|(O$EwD1&_fY_FGt`KFR5Y+1XF1=QSo#7{p7kaUuXwCC88yG}`>BqKtC zAErqQ1?@hMM1VE8ZLb1$>@`adhgKsQI6y&nx3y}ubAi{1b<0i#an@~kEDh~7=Odt7 zo)sh()B2}utdniE_XSDTu`RYU^-ayV^-)thh1zeK+5yB3*8Ql}$f2J;l>Gf}iPy=j z0aZRKziZYjtmC`_`~V&}GwuE>)mp82>!}Z(nL0IfY_eYy5uLbZQDq z;8%L79IUjkq5VKdDZ|F9&4%u0aq2pXgbV5{?ll)X%G8=gRZ!)*BFGLfY>Xx21Tyg( zvma#*Yim_&!s?zt6`z$YCuxt^j-9i!fJ|rY41W3nDbr3;6jn02^FrT!HI= z`?wHI(yEVqNbOerDvMN1?NI}25UDuCm7QuBsf604M!>g{YE+Hk3b79JwJ%CtY&4p{ zrKX;_m%6B{#oAO5%uPpbakfE=>H0mGN-22Y8EQOut&0`HyLt;wXY0)2(q=~}f>F5Idr z$XY3iNd-zt2FYJwg+zI|SO=}kry9+M5ykF9e4$g%p{~$>%O0@P_{Qx$g6Fw!rro&R z9@?194W1iW@44%5pq+lL2axeirVBk#;=A`gBv;(gf|Dd*z!b~Rf>%Nl@V|yThvW!X zV3V@%y0!(r`BmJf|W{cVz(8hRqC$Cadu}YxiLctnIUj;^ZsE1{k;`#DaYGnd-s@u#Hcmr za7>*oV^Vx#-x|h)YgWeUppDEo`umz`udpK9`iir!Eu-{8=?8B{eU%KLZ{|HGiW%%p zG?xM*vylx%2!cd82lfD>O)_hc`Y=-KE&w%|V7^*fr1FqYvMI^`q?4C@aE+>jo@2XF z*50AH9W-~Sz6ubfbh(86SwXFjFjIC}qEZgHfg&#`F9#x&L~iNk3A5P}&p$WwOyS~1 zeT^OTq3(6my2knmkQd%e_8B?Ng*p_)YQ@=dEsB+>USPD#LBS;LTly5c;5++Og>BtB zj^ByM0VUec(FmYC5~a{nK|1<&-1_GLEGG-W&cjX}z&<@}@B60XIR7^uf805NT^mPk z4!;xj7`T;V9TKkP0DAY@J2&_7M!$c+E-mZp=eO-v?o{sHOZfRtcem=X!0w1pv1WI? zHSCds%e}go)ZZZA03CN09es@K3POV&Bo>n3BMZ2u8dmYFi;B9GvOR|4sz_yi0WH|TFLQo6vNhP93zs^jai*h%#IEE z77xJ}J43uvG1N(-_c7&EY?ZldL$|nZWXuT@s(&2)g~?HBA1+7P8OQwg<~W5p#+-uU z;GH?QTi+dN&@%o1ATCZYpVk-=K(fUGHYRNnIVfz4ey$kgnqYBL>0I}7QCIwP8+Xh! z6%PgFh7aq+xm-(^Rkak9Rkx(s$d#5<@kmY>+_~JVjU&1FBe`3zZR;x!h9Kx-UEyG%&8pH>(gb+SsNQ z{IHLk7jzj{)b^yN(7)Y6|01W+iV{U%1Q8)>8avz4j}TB|g=R!GJ&IqK^@>tOzhIUw ziY-AIJp)Qt*7YSQ>mO!yx71pSVvVJGYeny3CnQo)LMGzm3H>tfJlcn%xamfR%t|da ze7tj+l(U4!x~V|xSu-VpAGkUtEHn{?VcHtDK7S5|EsJtZyOkjSZNbC-3@-giT9jkaCz#P{qO zSSiS%G?#NSW+uT|UNKB?mL)}k?R-LNe+KzJ7ZiHy7D~Ut=^{61wziEbfw~)`n#N7& zftow|NOyGGq^=lj8OjO4=bwPV;7PuLk=Q`*SRXXG`xY>QMe$jfE}M0OzY1ML?-%83 z9%=K2Rput-4fPK&ml^s7Kop1QJm1t>Gl$Ib{`Y0opTJL)W$I{Li*|N%9mv7IMP+u; zLO9wK5>T1ROKfnc0nv2G+l;2;8g-ep4?Hf;mC@giCKxTZiOBSVrDhXx3iV3_TsbV@ zLrhabzkk@o?9bt6>@&_Dd+S1N=m^`4EtG{e#!by6Al_i^*T7}Xs9gOz^7Y>$kf%P# z)cy@1l0;mWedxeHi`*WP3~Wx9DV&VW%;;xPV_JU*5Vn(7*~!NUKF$nhMlVYN>OCqM zlCM9_bXuRmMV}?0pkhoQx0vEq6C5>b%xTa+M6fwXNpYl_h{!?sS%9RI?oU6Eo=SV^ zL^392>pA4NcW6+NqpBQ0kUI*~&^E+o%M^Z>@biyh3Kh26BE?@p?T>j>B~}w)rU{5F zp5#L;?xrA2ht4}tU%{lobisHbyyHIcZVb#k6%g$$l^nN32!b7%j2?n{3>yK8^QsF4 zIuRs05a`MN(H>eq*tH7*y>B%WW;$sIIvLdJgB1XF04f|1^WEF_W$U#=tNmdn1OxB% zhiSQH*U$&l0h7B!?cm*w&jtgXK?Np`kqw3};7}L`-1%;T<>-AVOwn1QMyO0dk>9xn z+fFdt*%ge0DF_&&YY^Es-=s5!=U^mi48r+%`wxXDEuRAy-}0F*7lS2W3svmda_(l; zhsg^vu#(zcTNpW)w0l8pV>#YSIdo$3(2Y;GT(Wbws*va#IrLGkN$tj#nr!kyak*Mw zs^`FPX3IL)oXgFb(SYT)WJI(bn+FwdmJ6z?dp%r+zoG=@1_s>B`6$d8jQ6E>K7Lv7 zNN^g;Hb)uRg~*-~^`YHg=spN`=|-i;p%m)zFS0L3_e)#Y5^WJ%~ z=vxp2O64d%Z?0~0Zth;Ovu{RRdAC1ZZ6v8dl<+O1a`FhkVf;PIDc#hM;@b-H7` zQW?gHn8xeZq8+`$LQyR&`2q7G;6&iC{(SRRl<2W&53}xTQl7}GHs+crK?ify3Z7bP z!U96#1M5FB^#vyTxR3>ZX>P8%tiKaEliA2#j{3GBhyGz?i)kB!L_;oB6q_vu5I&Nn z`C_9{uKAdg);N|amcQuZ3 zQwN&CwmR@9bn7HgCkb5!y22b^<=?9O(xV;WalANH!HeWKcVzn z$c~b#TB!y!B#AiZ)|wSqoxa2(d&MkTe7USDaQMYn%++Hjd2hL@mh;{U;Qgx-XN42$ zYv{zMZfQAF5T_2{tA7J`>%Rj5Y5mGC0Z>vZd92d?aO#=q;wK6)lqjcW)$Q-L>dq=e z4slsPnY)vOvM1^u=x$7?EoeMUG?*4xv*~+?8}XXka&`%>Asr=QV+i=$w%mLkmwP9l znFApIfbs>pAJU@ky@dsbuml4K%tdsJ#SCYJuc>)!8vVy+N%Zm%WQv$SnU2XA#Mq?( z`#*B#{3zDcs;u*FW93G^ztPn7Vy$3`;Sl*PJk!jfAa&M^wt&vzR`Y+C!{YcXU~JNE zAKhMD=)KdXKV*A_Nn8I88c(|U47{_gl_|ek!8pxR|1C?B`rB~P5y8JFV13OZ{T~4$ zdp5F*W`O^JnKH`J?N8Yl25T5W%^q+wu_0UY1vKTiZ6X>u_6FB}12+{Y$`I3FI0Ijt z1^2|3!K`B=y$NqCRdu3!$A)T4Cp@;0Y%HvL;9|qYVVd)>NnDufPzG-AbmAyUz}N=< z9LB)?;&b5kYY?WuQ8Q8|W`j;@jV7wt%sQtTg3#l*7-5!jk4b@C(}A2SgCUi55$NVxWmuhBBt2IQRw3M^OUSUpOj|2#T79D4}kV-P8@~ z3Wn!7v<6wU7w+F(1E!Bc&@7n7e~2bNNj99AR9`^ga<={x+}}%g&EN~I$~KW{FA5X* z$u|1}ASa#{7RbkY42`3>(}_WJ>B-J4J7s8{bot7Nbo0ciD=tcv~`HP_*wR!T!4Q8n=D12fTYsm9Z1Z%Dvr{A*b&{; zz5w<}L5_JKM@9svIP{laq&4=&2t=xMh+ZaO3t=y?ci?VB?m(Bbt2@H}&JI|N5FP+Z z+aLCaec=v#Q*>O0S$xw>0o4HK+$XIYKNZH_vUXcR8hxd2J0JxE;Q+=?i%AOtSt^7; zHjMXym}u@WZrr7V!Ol?F7wi;bMJG@aV@u6B#>Wceq(23Hj>3sLY^e-KygC>Rqs^{x zQ1$P|J+4`x|Bkz{)gg#*83mg*-~Mg;wR;dJ|LV6LS{TJU?J)0U-m`-31P4?~4N`l9 z#o5t^!l8w+Fsp`O6Wc)u3-&n3U50k+g$dN(DQ;bemLgd07R#R}!t-ar;}Ckm`BLC? zu-|1C0atehdqVhhBtNA_%*I7;dsl}y@?)}@_fafe-4*U!h^rVj@5rLFIuedxTtBF2 zVea)Z!rH5&;jpQP^8XVKQ?OQJi#G25FdT-BF;#J>w}$DnMrR$yYA3pRDU;V2>aL*l z5DozUhPSoNcJdTz!Ih*Z~voSQrtYn&_7Ae#d*)UEPCKc`zIg_k_FE z9?tvQ?u`?`^3Njwq0W8~IFK;0tM8quzJZ;rk6|~9x@J7!Lbt>>Q)a;m!C`=H+9u5h zWWFcy^Ctk{+8?58KSVqTakA^k?`g#PIO@Pe{{Su`BEWLxG`i6hWD5E;zz`_qrqg~O zWl|I8<8N}P4G(|iZw`O%+23v7Ij?D#xemL*E9~c^1Y*U{GbQS%ZEPXGiH!9_0F&;z z@TvdB8U)bAX(z@7cMwJ9T^xjIo7`$pc2u ze*<=xK8kM1Qh}qImxkpir^7~A%Mt6bQU6JAYZ_))?~#rk`cP)DPLmQ zud^&Bv0EZ3z-ZX>Qp2H9g&-uMVTn==f(u1P@n-n?kJ$LX5s<&>KPKSZ>n6c|g8xAv zl11MZl;OJi^8l7hLtO?2IS)p=EW~*l<&y3p5#;0cL%2WS(qxy#_aS%KoQR|J&NEie+q3H$Xmi{knk%57`2unJA)-7NE zLuPqgsVKf|oL})3bM-2^3LEcoty+(~6+rz9>`##UpCC0Ek5bK6P_1)AL@6KYE`w?` zVnH}2Uw@g^iE0Kb1w8}^dJGDB)O2B^;b~GF(k~0vT3U1&xnX?UN!+Dzr}v;viB)(%_71_;N}!YAmT0N!$`Iun?fuJuEP|8D9wt{stz)$ zuK*W>HY3K)n}tRR!vcs0AuJCu1cw%D?X*$ s0f?00~hi2MfJ;yGrpbKutl$Tzi* zE2+H$8L%aitVRij?{JXc#TFtq6U+2Z6Z{5ir2TS$z=jsAI|irzHF788!i$XL?39)x zr?jH~1>5`z!F|}15$wTq6!4^JJl%vsI0M-G4>=>C0-zwc!t8JBNtACx8j=92sF(2b zIj-$Uv*4}*Rm9sFuXy03T*d^Kkc%#e0Q7(tFAlld4HBwP*^dE z2+>Y0=el|A6pUs@C29AIw|?U`YYa5K``+^YJgyERD@}L9~*8W#ACd@4=V-)E4nXE%=idK;gkQ8q|jtULh?9%!?vc0 zd7A;DAJzKs6Rb#M!-SU38V0doH(=8tlURknnF_5wLzA*GCXhrRngcN+)V5Y*QeyB$ zN6q&X;1&T!44r9FdbJC&|R`BBhNXp+6MzmAFy%<$X1Fegs3@q&X^%x z%;^k3v%!@l?8Yy{1kJobU@4+YVEj&?ZAy)+M3^xIlr=W*)C9C2=`&0&48dy;X?Km@ ze8lp@`t<sN;v(O-!po}c5JVm$xd1oj6! z^o;1gi_wJ%n8Gm^>ZCfQ#@9rGM&1Z!@H81K(xVD-{oVAKI>UED?zlQ@QqYFZsSikB zu6h#MU^3XPJ{Tq;y+8FX*f7Ypv#?U}0)UC4jZ2hR^2_j*Le6U_l4JWyWM6H^>ld$F zm^$4~o9nr$v!~m;f&#qpaK4rhLX$gG_m9E{KG`0IXx^AB({Wwi%*_UcnHR2Y-0KX+ z0V`LEfj1_CCyn^F#j+MQL4mx2A3h(wknfY>!$VtY7DSAK2!NQFy!X);pMMeKegb{z zGXxYvOuGx8y0FeYz5YD}MS^n#+@$N=gq45w6?~q*c%fVWayRkDGu_0OpS@tr@c-^Z zy`1MRwB5z2$Mm0~eLnLH?9-(x&M?6C4<%!B6mO&048@r7f9*UZBxbG;6L7iK>Ev1; zC!qM=9(wl5%eiZFQAe*ieJ3LXWUa7%QXnVQ^29ii+?iDGG3=yesCJx+3c7DK?I^G-KC=lqzl<^?=n0J$$}C#KedIa2!L(%-6`txp zkG2*VwTJJ7GXgSw0K&cpww-~Wei#g$ES#}Wjy?Pr$;9Dq8uxz@Pmqv9nE_OQ1?z@JP^h8Bt2kG6##Fhl1pQ=FDJ3#*L zfkKKs2Q*(40hTkCiIM`WG-(K+aiQ{YL)(Lth=w+<;OBR3aCFA#r5E}zlAWM~w9im* zOf$v`*Zxjcjfh~G2K9vdPmQ6cac~<5jGX~+8`$YYoP;|uAnj#*6vW9<_#xB;d=p}M zfZa}9h->zzAVG^gi|4G8!6*%?v}f7YN37GmODO&wXSVoV+XScUKa1AM897 z3@z+ehX4<#$-9m^ELK0rZm9bxt!3cS2R;s~Ah_wnor57_s23hVFM{%7XFBM+=C@ph1RdSBa&qZ2djoma3G<(!$(&FLd;r%iea5*Zwi@KSEEB5f%C#{Tk4*a#qM3Gkhz zdc(gSc}2K6w9xMpDANb9j|{>7${&N6LP-SGnT+KT)uR#fi2(O%u~Jy9LSXX<5&13| zw-DMX`+V`1-ySgo0w2-3OrJ`z&cSVpi;(yRF`lDibg*^z9p{be|xqA4cA1Fj6s~$@!87fDq}9<(m(+dty-Q=UHW|<_rPEAxG2>NcK6r!p^NoA`yrczgpkxcu>cP&JZ%V2 z7x4fAz#EddJpEVLK(v8Zxo~B6^3?~jafih1-V2YqWFg6<|zI&2@obr{Zj*!2+lm9+PO)u(N}%Kk3_OqYN>8=oCWZSrdr z_Z-Np&f@L;WP~Hlkm!T z@N6MwUM1)iQpugDBXRnD9T?*}S$cIC1Qe#>o-_eKh}d8B^xFso+OcNgAXcVg!+!&f zO0=v`Yyfm+BX80mIW`O8+SQW;WZw??H_( z1>aO$WyNJJxjfO?fK~{7ENgmL7=gbhA##KZ{pv@9K6r}aYKU4OIaAQb;2`j$82b7` zFy5I^yBKW5lcC6!=c43}l;Bi$XSc`&V``j-W#O>(3=br|gfzi6y^)#TCN|pamNqDT zpEhy0WcPGk_&6}E%EoODqhSfly73_?-=MoAxDmRWUFAUJIBSpcGJ*wGX+v>^Qwg35 z*dT_#gswMf?>-YAQ-C9Oi|>LvQ4Xr=Dr)O@@l2z?k^P9X7^F-bFhZL+SZ+S~bQ9k= zFNAE#Mdt6l@dGCE?$fvd)2g6BG>G^bP=aC&@m7@v4__N);U`f?zen)n0PXa3oRTwP zPx^l&<6CBi4>*b67WWPj1_F`wj@NbXAb{jO`~|^De`a%Tz$%0QY|hSU^zvK1+3_IE zw)ej8?DWoJw$oR7;os-}y2H}n!3mZz$bx-h<5m`oL;CHlLz>(Z?UZxHQ^lnUCry}< zuU}`opF?f^2Eji7ka#ADBX2U}&j=nN_#DA?f=$EI&+zv11VZ11eEuWe-XZu-g0~1r zQbpA=wn_4dO_mdR3(aZ+w)HE_CWVDygb9*q#=%EwW(h3%Ed55m%sMjYf6mk)fXJ@j zZ^PQ8wK6Z95*$X8fG%R}KjX|CL#E|qpeOCIoo~h^k{ON=I6)49pe8|FX(><2Ns=@O zGJpD$XnAjl9weOPK3cxntlcb6mHeCAmNAWvNoX(O=aXj}gBR@P@Oy;V_Sk_UKH;E- zOW}#fAQn8BJz>tmohOG4J=BZA#h>bWLL90vh?Lwx&S7U}oDwY$Jf%1<#JP))hQu6TOFn25=u%Tly~(J-<$z@vx#IdKy`W+#J(IVi36e~Hkv5Sh%|FrpzSO^x=!v7`wi_&Z8!J9oPL!3 zp8$X&A~LB6bGS$OQ^!x=bIxHf8=eTU#kAvy*Mw~c{qSuj$BgHyhDH!@@gEumYz6%a zJHAE0DL3-ohj~l75yfHSK;W!yu@TO+>1!U7}@(| zk;vdDWTJs4?`s=(#PSVt(Io?PnMwGU*u_1N3z+e^o3}#nxOCH4z%lqjF2IOgTX6eh(w>nm859%2C<_14U$sN zCSFMkVY!O2q+tu+XcbB;#1ajo_Y5LE($E-rzA8gG2`9m9^f4l5I!)OSh>NkFk8x|? z`00;WH)e5oe&=EuL;~o(ihu5DPtek$#wDzKBJ9VO2XQpk6@?nF_wzvgjoDZ51DT)| zV8r-+AiX(i_ihj!wO2@oW8^m>7W-z5k}TV6KjzXz0JbN?VJ(hN%vSr$SDj-d(>p*eQU*<>`5h#6+z!-i_CeDIT{7SenUBNst2AROzz17-~k?l%AJ0D2tu zez%?3`D~+xpl=DJy>*PfF4h`?eGp+CLN~q#W9Wy@)elhxx}I1*uTX5cZR`IDj~e`O zjST!=Sx^#aUU&{-^*~1$83@e{v}6Y?n-n@AiiFwDAndcq7kw%V4RMSqH4b_=g1E#i zgpvWw*07pjK4Qflzws5c+C>_1<13&z)1WvbLUDco6lV|=XDk?1yTyOCCmbZbp@>0i zF6hnR>w`jXra^C}L2tMcq7`C|#?8#Dy`VY!;H3ji6Izq&(i+)@o3uu_dOrS~geSEJ zr**lUy9SdXqLp%oo_%_?V+S{UO>yH5oL{9^*$EU1MWzup8H>fVDF z*B-mo)HRixzH+VIH%H@*FP<~(;ZsEYGC=kB@W)a@De?(Z#W%0{w z_dTY*o!~hAsHD)9CN0@?Gj-D&aBD+Fx-#*@{XAQ>0g{2H@22*Bi!9BR?9Tp`xK zWvn@0X1!k_5Ut@S*@rV%6)GSE2&ct-Q8Yx5i0r_Getn%yngHkw@2}tEqxLgXfan}U z-PtH?PS7CfxY0crhQ+T3vkr|sJiAAKOBV9P1SJX=(K@1+90HlppT`5*2xX(Sw{n&L z(ectZ&R;Jv=%V)_$w=UL2|xcZ>eD4djRSGv5FxOUBz6a)0_1*u6`S8|dT0h_(|0`O zF$ftbjhJy5YwoM@Bo0&G-zIe74Db*ndtZrcco>FgGGZt*3CKd~Hns!~95Ub;8wcUY zOku;{MijfGah|N3n=-k4UKrP3RUV8r_M5I89EbA)0>Z)2S?GfUKaFpnFnIj=3dslv z2V#fto@t&A1NjlLMp{EFg41CLPKPXl3{0>{3I>&d89C%+qBO)jAtE;>k){V;i~*yY zCRgp{Z>__Y*J1F*ziz>Ij6a4;%HQ}Dnv0O8v2s)EG~2QQ4I+56LrfatT@p z|NZV5(R%_9VRT3msC^2GG~J1pMUftIaMI<=cw$e5u=THTM6UifxahADknW)PCnSGH z|94z@8kqND4MOEtdujX;mHyl8&e5O2#gun#;JEMZMOL4@6eS7;)hrbXnyV=hsFBn3 zC8Uqb70kmBZJf=SC0)bKlt?x54@wfNYLu42O#?Uno{bV;6gO8x%8`5Jx#x{>I?l0j=i-1l|Km_|SL+lr`iO9B_HH1@kDT^*8%=_OXI)hL@CSfGM)Nmu?0=_I^V@cM5ZW?qZj4IO+CV# zAL58_5bWXYrwE=Qm?rpDg5MzcU4lO$NK0jcNrGbp;zbtQ^e1`yV+5ZiAYl`v*g`%x zX=w6Eb71Z%>Q4T30MDbbn_)-zis74nYV)Oc`xtCE8JB0)({6Ss?q&zFH-`44Q#e7J dcI?cB^k8x@*`I#Qvxgs>*qh#+o=89N{{g%u@)Q67 diff --git a/utils/yolov5/__init__.py b/utils/yolov5/__init__.py new file mode 100644 index 0000000..8403a61 --- /dev/null +++ b/utils/yolov5/__init__.py @@ -0,0 +1,71 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +utils/initialization +""" + +import contextlib +import platform +import threading + + +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + +class TryExcept(contextlib.ContextDecorator): + # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager + def __init__(self, msg=''): + self.msg = msg + + def __enter__(self): + pass + + def __exit__(self, exc_type, value, traceback): + if value: + print(emojis(f'{self.msg}{value}')) + return True + + +def threaded(func): + # Multi-threads a target function and returns thread. Usage: @threaded decorator + def wrapper(*args, **kwargs): + thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) + thread.start() + return thread + + return wrapper + + +def notebook_init(verbose=True): + # Check system software and hardware + print('Checking setup...') + + import os + import shutil + + from utils.general import check_font, check_requirements, is_colab + from utils.torch_utils import select_device # imports + + check_requirements(('psutil', 'IPython')) + check_font() + + import psutil + from IPython import display # to display images and clear console output + + if is_colab(): + shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory + + # System info + if verbose: + gb = 1 << 30 # bytes to GiB (1024 ** 3) + ram = psutil.virtual_memory().total + total, used, free = shutil.disk_usage("/") + display.clear_output() + s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' + else: + s = '' + + select_device(newline=False) + print(emojis(f'Setup complete ✅ {s}')) + return display diff --git a/utils/__pycache__/__init__.cpython-38.pyc b/utils/yolov5/__pycache__/__init__.cpython-38.pyc similarity index 81% rename from utils/__pycache__/__init__.cpython-38.pyc rename to utils/yolov5/__pycache__/__init__.cpython-38.pyc index cdea27d3ba5164e2d584e9fa4441860047b57834..b77defa0d1e80ea679b0b77f6a15e92fd517610c 100644 GIT binary patch delta 133 zcmew>_)Bnu7Nd>3enx(7s(yMwVsS}gj(%cFN@_}caY=qrVtT56ab|8oPO7d)W_m_y zkw=hUYKeYnNoG#5er0}6ewpd!OvYQR@=?5CF}?KEywsw^oFWYd28JR{5TOMmq9!M? d>oG=6p2%LyXf~OjBZbj!atTKOkUYv^0RVhRE35zj delta 116 zcmew*_*Za)7NfnFenx(7s(yMwVsS}gj=o-UVsb{Reo1~&a)y3JX_9_vPDxQ>PGw1E za&dfReolUwX?!kFX=;&vX-Q^I@#ZwfTda(?COfjr0?BxGJs{b~Udw1XnTI2V(Q$Gi PM*xuAz$`xbCWj&b{(ULy diff --git a/utils/__pycache__/dataloaders.cpython-38.pyc b/utils/yolov5/__pycache__/dataloaders.cpython-38.pyc similarity index 63% rename from utils/__pycache__/dataloaders.cpython-38.pyc rename to utils/yolov5/__pycache__/dataloaders.cpython-38.pyc index 152bf1a2e5b145c1dfc395d12d138e6f28a33501..fba6195aed60e418eee32cb93e4970627fac9b08 100644 GIT binary patch delta 4295 zcmZu!dvKK172mrXHz6r2|`j}H7rZ+mt=AC;O-f=hG(tfQjF7Xp2@Rj^pnif7}c|*dXv&D=NIuDnx9@bqvNK#^qv-_ zcz5gc`Waobt|#BqMw-%Q)^YP)`eRI-Wx8W*&(HDyEYIb6uE*y38+QZ{IUaOQMVYgDjm*?sF7+=Bb(B-#o==FRh z>RVB7$kXvvyrj>-SMxPEwvDglE+pHIWNzMwE*jB=2el^Nj9Rm@c=sKk-4fhl#Hv2-p0Q^(`>Mhq8ZjE~0@B_WE2H*bE4!y=&HVl8|S-=hVp?Fp9{ONZpd+@watT#F}k6I3U@q=muf ziSNefSCXoXO$4n~6Ou2pq#I5|8GMkke_)y-`1V(Jb{Iayh0 zI;nR!5b%W*Z{LtpglKpd3hN?K3DpbtYu%|oUicZ4Q40q=m+oa1keHFLML=~%2uGjI zD2o~WOUA*pxwJe%J{&NR`3^$YWaY#_cV_uYD#*V+9On-yLI^^iCNan0fDf}eG>nmK ztM2UoQWDqXtS%*rb4=``hh8khN0^Oi8HmscGdZJ$1Qp*W@jMBQmk4HsX#-mXlcrzB ztaFQbf3CEY`1$KoubDSRccYA0(xEwTl-0w1nhO=?EL zCybTB*NZJNrb`Q7jL|!R0mJilU^-sG;wlj=~vS!Sb5*=ojFCOluWU9)NjVH+UZ!EcjdC6PJdU(P+Vxwt?hG8c0`9B{Dq-0Y#P zTAouw3$ez@RwO#fKsJpIdaI?WH1Qce!UkU;ATlUIGmI_gksXw@4)}O^U$NXly||a` z77}dErA&?khE}YSb!<(5P374Znasxs{u>f!NJ!+IaKN<{f5uE)s#{dJisn;^m;ySd z59@m5+MaRnV~hE0zT{ZBAtEZ*!Td3Cu!ari2qCe6)?i4zIVJ%aQpoL{eN&{HKV-p^mqw z#UabFj?P>SH_eTb=pm>Zu6I zBrj%hiP+2F+g<&}we(sD*SiW>5#)3qMA&z_E3}@}KXm_uPWA&^OVjLx-XR6KGsEj! z4{5Qmaog$0kxvkGIK`a7QB=bTzMywxIOrck)70&ST2r-d`^vb;K(uD>>+we$3U$28 zN;a5xB+J^uDk-2v(4oRE;G*|aee;f;`f44x`t#uOjzhE2ZuL$l$UV75 zxZoSFi8)l$&I@rF@>j@@&>T;Otp75&$_vtDZ`njnZYR+QDaw}l{m}^{%0am}@hbV( zEG-;2;^Sjd?u%JmLEnj7S*_DVdtq+bq&nfvzNTCmh7^*Jg<0=xXyyGAv+=yxKa(Qk zF^b<3iwk-ND5$*y8(E>e(E4ato>jQvM$$Ue_x(0K<~cR+8dllw0RSJuO(aAnD8wdxlpQ$;&%oNEww4EgVN}Lwh9EH;0et<0-dx ztVNG-&0YX1u~E2MG+Yor%{9~Dmn5V|BZcB5VTVV8Eo?Eo6I{wFkB3ZH#paM*8;QC( zw9a57Zyi>H_y-xiO+Kcq_e@z~=kTvzp@vvb6?Kbl{#6`hnbmZC74B|KI<`1ADS%mgi4H z<9L6v3`M*`ft0}y$1Noi8rc={eFepz3_4vf4quEqJ;A;n|JA&T5}ia4v4VFGzF@SG zZ6zF?sAlMCCZ_89KwE83Ers@mIH51<+5z}bgB+VEcdU`MyL+nkH1#v$G{p^ z13x>GGkanE^~e_yT(TbBIMnwZ$Ye%YGqv#LgH4#?`iI)G)`rhtF;k<`{Zd&K#@hMpP#%_AA{<8{FB*VgpE&FSlZlKsV4t+*!yH} zjKN1w-mRgHe9x%<(;2ZJU{hymSutEZQ-U|<-_Mlf$WN{bDiZqcihO0IV8NK4a>cOs zKDBG3Y@yUsTUz9=vKDK+aMEIGF*jW;krqqyT~@C+OLWa7-X*bu#1aDMQPyp0`qL?F zfhWFS8Bzp(#nw!>WQigKd6GiIqIbZ1cgPszi=_6d2=X}O-bE_dq| W(NBzR46d-ErT{3;zS1m*N@# delta 4490 zcmZu!3s98T72f+VEGQzdf~*U;EW*OVg19V*5Rhegh*3brs2c@0+yz#b*T0(}#spt6 z$ylTD7}NNK#7@UF+Qz@NO`SfHNm?_RG)*%dJDX%u`&Dh#oQB&CkJZ=YrVBh$LD+R z;wj!bp6adVY2KCo2ESp_&5eA)lyrc38>92|=-lIP68jl^A?;5#Pw9B(PRZNix9(`2 zTs5U@(rwH3w$aSrwn)cKJEdbxzQAkHt>CVM6FTnl+YoT0zl2w2>G~M2;>%IxJzvkO`3jtG z!g)=Wj@NQ)pPt{v>u_x|ujeZfYzu-la5t)OqY5w18o38&9{=JU_wLw==-YT3Z{e*d z)6Jy~I{WI~mbp#7urCOIm#!&S^goac%P>9v_P~Il5-u2?Dxs+?h!aPV#D0IcXTTR4 zaN#VJh(sqDnUSS4xdC{>p82;JF3CjfiE=%Bny@WN1EJiL_%gG3XgA@Ogr8e_B5_3G zk;rZuCD5o5>gK+vm4&b7z8f?DAZe+gkyu?y`n-jia)c_d-XCl3XN z`sG|0pYLWRaBY6Kp^!L>z><=~98j0CM+>XGnsS?oD9BE@n$}ZPjLdeu{2<93B_boa z$mFMKtnm%%jA2#`SB!;PIQWZkZ|V%!DH3u*c){v7`$LS-Ks z5Sg)<(vCbH)7>lsj+$PJv7~3w9jHbm#TOR1l8Y`&Ap*>DG9MJ~` z%jLUqAtD6^wMyZ3*61BR!eBNqkHT`|C1!&JOMZ+kRhI9^%s;mPtA>NQxyBMw?jSAk zfe;VKaxGlVJuf+Q@KSkJg8WC?E{Abzc8vU_^%?D4Y0CREV;(5VUxms?^9Pv;KFDvt z0?IA0Y8hco!JD{ewhg3`&KR$b+kS{!t%dC|0+WS2J6a7lNn&;!A-AYXq}DzM>a#8I zU{T7Qu{l+A9%EBuPts>oGVDywB&^xl1fQcBz4)$U8(X5pmo8w;h2@eCPgKu=sVkTmOkCz@W)X>lfq4I~#D`}X9CtjYUk*I_OnmU!FWey5i zwR3_Q;U~^58mmQF)qu5vn?-GWLb9_RSI$>30+@1Di(0m`NMaz=;|m5J@ZaGX0aU>M zT%&9mj8!`a8c~z8r^>hS z6VdyELD@)tnc!d*R}&}>lG-Lxh;6nruzUGkD+wDL;}oGkOXN8sPZAL! zeTqiciJT=O44)xR_+t5uDq-JmX`5``9ak3K6~+!yODpq9Gv)lE6p=zc{J&2Hae}m> z4%=VDifAt>Cr&5$R(#1^%9fh946GjP?gP_dyW+lpWxlB~*CI|&#Wb$Qq-bY2*?1K> z*Lwc6gbc+L=|_e~&>pqj?8HJmM3xCJoUp#B&Jb+|RCcKoabRiAEsBaJ1_pcj0zrQ$ z+OA|P>79ur2B2v*=C`3aCq)G$XiBOH{@dghm5&W?DM_uFE&6)%>S7YQh*cEw_YeAq z!l663rt!>(mjh2&=0I;tT?Y9g7ZIS)s;Y(`wH(9a-_qKEedf{D5{$0WdM|5$lvSD5 z0+L8Y5}DiRUf z;tLPRrSL}E#Z1zOmyn3$3bI4oK2GKE>eM(yIn&-@)?kx@F(TWDwE^NgN|Qt{3ew^w zVEaw5u_Lw#UG8`mubOq88QRvQjCQu_@c_QG;Q(6+bzPlFZbEpOhz-8o)t_Ha!^KD< zM%g#ShX;E`{Bmf--y5bgDlSb6EZMjh#V>3uiNTI+{3#vor#9JAO9`c05VF_=Z*1DH z<%P|g&m@ZzmMBxzV^r7{6Lr}w7A>FBw529aU5Hla%mVlg1+UTU< z*%vMBX~I28WC@XIj=Q_JvvT-F_c?>=DfISCT17VEXN%E2rph{%{OwocjN)L)=`;+F#lzNP*|5oz#jizVZVHhOw?E(i5v0p zU4o9s*l+JUVHVXpNouanct+C-xY6e^3r__RfGom#r@eRe@0<4fTK|+y_-&A1ClnF0 zU(@J15v}w42i7o)gB0}9mN>&eqZv(3?ga_ zY_t=ucIS~NZ>m?x3il0;`0Y6JetOk<`uT&GgF?O$=|Q_>*;7Ct>Z$Ga4U-u}&L^|#RKrmF+?;rBZzM!0jTu?FDk`YN$H^Rem??8_j u84iMbIQb>K08)q~H)^An@0GX#I=fVLyJ(W8pwR z1yTWJDZ5~$EVXK`BsvFl}p{LlJ>a|#RsIaQIDz!@P!-c-$>eOnvj}-cgYf@|EK3W(ku1&3# z`&ePHxGuF$?mG(WiyKlK)DsD_|lzY1{oEnz;Zc8QQx+%3;u3J)D<$8PScDZg# zZIkOAsXJ7sN))yiM^Yoj9jP5wAh$O6rUh$p=TacGE4OQURCTTkq{ec)bCJb8s!Me* z*$YHE~G4FO4gf9?T^?Nh7yyto%R z`e*FaoBjATE8_R5fz(@aw@*K))~Z3ZPOX1Ah;}9Ad4n37ekgCLjW65i^KfqS^#17s z>4$T}iwBjpWFg%HxqBC^Tx9x?x@}(|7hODz+LAN&E0+ErYEvoF@_zHafcmQ1s&2n* zrH-htsXNqm+&!X3)Q-!6)KT>&wG-E4xwooapR&}b8oL}yy-oe0+N177y5q{u?VGUG zUFvQ;oyeV72%z@R;<&nJ$x5B{YrYpX+xlaU8c*?qu%t^jUSE zy8m)u0sVW$KL4!xx_YzPhteL+J-QG`J@!aIy+u8EIiMcQS<{a%1XE8S<{`BoG5h_P zcOd3~dKfVe`!OkXP#r?MpHzR8+mL%|T|ga1>yM~ME{9W3t8b{I>KJNxr}|^{Hgz0# zX>~%4BmFb#Pt+;(cEn_IZ|to=j8sZdKC9r^_Y4bDV2Icy#v==ZVc&C>Pftt z$c-!na;ePLNx2n}@%S1>` zsXTL}rq%zcW>f*GW>is?P)kAm8+Bf3#1z%Hl&fZOS5kkfE~tyRE9c&#c4HPUsX07V z)Ss!U`U%{fS2Z<{+*LtWn zRPR>r!S#~*bM=1p3f|1A52z0!rmFrzeOUc8Vtzt>D3@HUsShn#DDfleqllYVA5$Mk zs%OaWz2dKGuitN%;=ocej(EvRp+Ur?XM-DUL| z^;x8ULH#@Ri|UsU^Dgxr^~>s?;qFEC*Xmc)uj1~C`Ze|INcEEX4fQ!(->v>eeO`S5 zZ{DN+z51g1ZQQ*#S5^OF!NS~pN&OC}eMtQ)^?OM5VfFj!4{&`Y_o3W}bFTygb|5#L{t$j2#_yHf+ZF<^g!NHnm14R0 z;!iIHrazMUsIt!lM!r|=i9eDn<#eWyoOSaBC%#M&JU(K1F_p_zvgOL0+-KaJn=j^Y zAJMr=F5}`Zes)UdGU`;hT)6v^DdnV^cY`unMbWD_WG2^;9y-bXjT0Q>giNjBvI-Ndo;ED9vqo)ra zICRE~sEodtFL~CURL{v1r_xB%co)cWJnDbEXWI_q=WIpb25Nx?Yd+vwwZNji6j&6D zN4U(G1#5;g865Y*87G_1dy)KPsjPD&Aus5-+6xwPC5&zRYmD5L0kLTPmStnz25HJUcdmfUS2S%<{JNkME={oK3VMFm265qy?#tgC~z4csTvY zsYkJT?Xu%VbEOM;T`slM$3fLnDMut*E@aLJucn-;2hsRUFQ;#32_KeMBCMOHIv5RGmr8$}EdE0?U9yxwE{pjiA>4&jhssmH5 zTXFV|ja|HWanvLp&6bNJK|P9!^cVs!IGJ;A+WZbQ-{d)tboKV%iF<5ifB|<_m)d&5 zmCG@co;-sa7sChY!CDIFUc#H|-L9>I3)cHLV>Jg=NQEzBZ6CrUFIbNQYTXlfd*F27 z=K_xh&dA*pxGN2W0&Zv~u2UAUwH0_rKw&bT2&gEgBv!rqAi+&CKaq5&a!KshY|e3# zdbZ>w^Cc&*a>-0mm9sNBoy?X?ZU#$0k5)U!MolE9?`(C|81s!exx&O)c2bvTD{bqO z>mi7r!`}dcTHxvKc}oRqSQn3>N#`+HTpu7grKWKiPOgyA-GDHxQWiyXWpwQ0#<^6{ z4?fO9xmiN!e{~2$Zps+lS14yQ1?R!hM$#kWBVo;rg6(`M@3;V#>2m{% zfI3){_7Y~)Y*A%x2+Z5nt!}7hD^Qj1k64&FGn-EYYW8Ay3B&YwKn2gBpKb)R`9xzj zLo%C~@saR&HR={CqwXcwvo4-JG7^<#>P0eH&Z-xBJbm(QG6MR3w8o3fmN2+8dM_RT z!-FcX0sC`+dT&O|$`MFl1TN~lo6DRlZ4n zVDbm?;06`~PQtYoP>pL(2d6`z5_{)u-YN+E>td4!9dwZM`!AFhQL6SWX?Bd%*HFdwN!7Q1oZqY%q@Jo_n-wzn2xoHC=Q zyU;StseO>ssJS2IvD$h2_98|Q5PifMS*5v$H35+qEtDrG0Xn>pE|*;|HkBzUEFQ<} z(m==;a!1Xx7tEJje=-0ruxq@~vGF4(z3}1Fr%#^tIuLmrCFQgi171F!yO1k*U1wa- z2gQ033A{K`$CO@2^EM8Y<0l%4dZALLnDe3+a{64^$$8cknwBZec;SK+8bpJ=sNc+$ zQ{0Iv(teMo(LLF%T;>VPYOghj8Ri`KBGm6-_ag?4dC|v#*~=H5wtdgx;}($RPYg!9 zM2tjybj=#)2A?Qtz9B-rw6Zm^eT7W%oXR{nj}-qmegXXq0iVJUM>k*dwHF+7=!cU1Q-XJjnLLLYTK}wjOj5a$%MBkvUWVsh6nVD*vnjA*rFpH3DKiB zBCZ-5!`?m%_!JZ9l`hYWbQm;r46hAf3y~DjdssxQQpmWZtbp9-95+{#UFn63GuY+; zga??2=r?^K11d+BWCbLs7tBsfuAGXVStqA~i`juR@;`^nPA`Ih9S01IV~GsnU&7L5 z#AW%S^^dDyKYk9eW-b5`L;&K4PL4Rpa37ID01;79kl|q!QysWQR2*ZO@S>*-75P16 zmcOJ6B-24!$+ zNHPsVaoWBQRSogPhs==!LY)W|F$_I-R)^$k=izU(K1; zSNX1;dXN`CgLsF;Kme17VNBGAkuw4QO`pdrfK$vm0ctw!#nb6xSd$XmhUhgz)JyqV^di*n_CO?3qT_7Ld|spRv9V1r~_W9q#UoOUV)E7^$xy{ z)$8|{l)i#k{VoPCGH4E7NLgF=Fk!(e|Qe#zDcjxdtQKXB>(3V#0BCV=3ps~SD1gPmK0m76p@sDy-LSuI zpF>XBX8|Ehy8(1sdL%Gu97Fd!Ygu4$nStzNj#pFDfsJU0*I-0Jf8Lo{B37%Teq@Z&7DVL zum0CY_>oF5JP$Y?pVl_W*f7yvI15RrHjTX1>U<4_)c4l6A_V!j{M3jJ}^pg+oh z=zZl-Jcqci;8$0e~|5d<+Ccp<16cLTTxB9cF$ z%wG&ZlnI=0IpzkgfUc$>0J|$V-j&Or)MFUt? zgY~Zj>h&*KYcXv;6B}!vdv=%@M&_RQJI}cvc*>q91BBTJCyn{8*&mV_Z!Y_5L4Q(b zJnMocs59^U303*5nb2?IjX$9g3^7D7!xar)H_bU{nbSrF!^_xvlh^H?$+mS`Zz~Bo zCy4+p9jvz$0|Gh7Cj(c5tpcO#)?19MNPimXo2?h|p}rS!oNQk_6bV!}-n4CXQPRjN zFN)<`0my|EepYKh6+!)8WFk%K)=OadOH95rQI;okQ)pnv3qvwebc}T6W2n{Z)H!fq z`3t$UTZUNv{PX)h$j#~}IyJ%-H| znl9%{VD^3i#ilx(sq)1%MO5|M*-}=g@k*pNo|T3G3sI@y1aSWCjF+t=b7-coISOCL zYoE!FTm4||`>X+oa)yN6-)iYE;r$J`hZA*>880C)pq=Xvq8Jfv$$bzzM^>d9oDR3$ zM@&4{z3WEt7UG=+8~4G*4ns325S7#M>CSXQ1>LUcZWY=W!0N|Nlcz9Ln4}>h0? z7|7%hG73Q1_mm9?32<@+5?vi~Q5-N62-(ZAE4C3&6UX=p}gE@?+Euy!Y$K2h3|j>-EPh^-mbc^nZhQvIcrq6!jnQrj7~;mv1_#vT+S3 zM>#Z*lp!H78Af1o^LgidaQx1&4z@$)eb@)j*!h&VOb2H=3_6((v>+5nU7{*Ck(q_q z?w66(>o}DmM};}?!ntC_ox3Uc<{18GR%hsi2zr|q1=IUKp$3L(c>^{X^nmEO?MsFBiCXnIXi5m#`fxv*atg8^| ztKiE(xqx}wApx~0H_AMc^r;ROnI=WGvXkBHBoGwD{b{Is z{tHU+yV!5(Z{z8P{)G`K{o9NiLqQ>=2mr8oF$A@EcnLx!_H!C4CPU;$HJ6STER`>M zT@$)oWFtW$7AwBQ68DgsqY?7P3#rVUgH6U>=^BF-lM^HAOG6U>-|^CK@sOpzi>F(+ zcoc8hV#s^hR!EP)RAcF*rD4tyTkKz>ATl^7_V{&c*GNp7ufM~FL>T-vgTF!Gh323$ zg*+8nF$hi3?0Ty;ldNcUhqM}M_Z%=~z1bM(zeUQS zgWl*2mKg_MA92GXP~~%k+71JFO&TsUA8qCXfJ8Uc$ysWnyf`=%Rcg?QtaaL60eS(R zq7a^nvW%kwfsdPZD0ri6ZN&fx6z*o%If{hBF_6tND9qAADb}g6Y6u%*hcAXRWB+$7 zS0>=^Szm-IIjF3H&TLWtC1U&u@a*DBkpOHAxN2Bcl#S>{YHF@x_}7i!kgfj_F%X0|FX>m27DfQ!Cr?B zOLxQQ?(s%8ymkK8lB9$)hL4Gu7k;HGO2P-R_D*jD@J+{IF@M1uAbx=vD>hyNtpL6- z^}}c|;KY$HVA~*fH&a(hFmk<*xXD*3`KB3OqzKTN({1E$ClWJ%5I?7a(i8|901SH` zKwkJ0VJWGjhZ+g?1`FV-F|79kNJ7Z77V4q306-Yx5cHqp5*ulW4NpU)>q3>JI#e9P z9>cJA0(FFiph~E&%duLR`+7d&cGRGN(jTdXq0x*hFrBJ<5X9Adv^1o8=2=Q_4ZuvT zl6Hkmi6GOXs*mwNOC7L8>!+n5;?^{4T5^d90==*T31G#TgN#yCC3H5V%ZK}l)Zf5(` zma8521?EGgUZmYBF}Gh0GKS2r(df!<+BRNYT{p_W)K1Nwqd~@4wQpw<%C6*Wg)r7h zf-+84V!Vl`hltN$OA3<@y{)fpBGEQNq+QAOLfBSA~zfID6&b&VZ(R;X%StR8s)vbmhLCauw1aha9plt|KG}mpavt^-Ids_Me z9dh!zm8`}YHvwW|Dk%^Ojrp(0=tYUQp*yJFW-?va1%k@qj?SDrr*ju}>D)x}C``5L zBXnUGt4>z!(b4PHST%-}+43y3-~>Wm1S}Es<}yXe%50%f?O?gQOU@>d;ya&M{D&{R zKWS{xl6fbIx|8LJB&RodZZ27k!XBkq%)7$kLc5xL6s>ut+WSlr%itLnmIaW(Do8w& zbj$w27_IisR`@8zkQ-0Vau}1}`P2&|J^HtBYrxKZy!&qq2t@Up5CB9$U+d}y@X-%Q zEHvM_OVr)zd_^=L`t!{41q36Vn&{q(K;a9$loy=U1yF9zq%<09g$2S03f6C!LWf2{ zTv(}u*si4gTvjO+;=q_S07DR;{_kBm?c2C22>${hqo^OU2JH3V>0?$ukli3aUca>! zT;Z^_6C9%MMf&UEyGHLD-QBic5siiAV?J_+#-NOXWpaohW zNfV`bp{a~k(4l+<4=U9?jFUz z>oK3*PG;H2`X5m>r{*AjP6>f9vuJV+j4eSXcoov7L6T3{c6JGp2wWlaqtxw_xCa=5 z0S@j6oWfV5^U+#JKT(S~Pl)A0l$Hz6TeYyBM%*+MGQnDu(vBLf+am3fxB8wJnu=ox zCbpN99Jnr+AFQOMz++ZsCR1IqspXLJ?p?A34}%=4+i8`0 zrR+G$ezq2vgvF_f%>!G`$7rMX^wIf_YB%zB)_|)?H=chtsK(fI0>7>aQjdRP$SL5` zxM0gp!GO_POeKH`WA8wYC-AF9fpNc4qkaE3+-?bbV9CU`WJPR=q-lAeY+*`hZjd1l`2d3MUpocdTAABISO!e*rRL)G=NQmoRd~<&v6o z!J4+(1_Yb0mG^@fcU!GnAZA~CH34dbEtJgyk@)@S_nMhFO8vSOnD4X#)j_-;keusl zooEMy=f(m~;r+9nPrz!C!g8SiHjH~gELmEw7E~2>RgM) zv)DIIz``S^ZqBm{TIM1Yv^*?aA@nITQIS~z7Jxe>A!D~7Y9H7^1PgI-ej*RuIX5ea z(59rOfvun7k|wMowua7Qn}KqzEpOSiO^i*mA3>Mbt<6O;t(6jOm2)+S zR)OFXCPzicej_lufC9i6S^g}u3M}klS1nC;a8_qwx##Q9h7oTBetkEtn=^RnkU0$m z$@I}WZwMigd;}<;9Ig#6X8#PprsAYiHJyOi z0U|)E@kRb32z895uw!0#dp9oNg)ab!gFXapD2qnljrc6mH~#P}% z-i{Ga1u>uY^ ztAz19*iiyz+B?C-GuhSx0LN3ujwg>50a-`u-QWXz0t?PLCw^okA-3g)@Y;>qV4`{9 zPKdvx-}*Jigduk0af*wXy!*D?oVN-h4_BAx4ipN$X|4fM%@t(nH*tl?T9H83ihugZ z6#YC`$^eR|xTG8Z`UJqx_6p#{(r?E5>v3ZO>_BXN&8h}*X#*vM=!Ds_h)9;#0wA9) zJq?h8^EUt~kSl;z5*a6f%LMG-^^RJg1duWb8tdu%p`-_OvH(#njd4N8fh>pQV|NWI zrJ0Dc+l>j)4*G?HTv9Pyj^}p^wP24|2a>dZuee+om<1|dxm2J~m7SJEqcj<67>3Sb zg9-x(C~UN};X?T$kozVkBIOIq**Ek4A!N`XT>`oZ!EZj6rl1cXslV$;%tl1VisL;8 z5P!|ChDYZz#X>bSI#(>TNu0<>aIwkwas3e#(`KVCV8=N!v%e|f1X) z*chL)I)wYS8aZQxyHevhysg)W?F)?&!F*GU?tqzcw$YT8)HKiB=&ylFgyotGS=J)9 zGK?W&RP3tYI^>=1Xw40^FE_j}T|fui_5dALpmbW|BO(V9aG+vW4IoKOQyv6LQ~(UF z3)W%}q|{&V+tdq5^(veY2syt8+O~rwTov%tXk#30{12+rT|M28)$?7XN^rgK{UnUJ zRTtLW_Ia$h#Wn80^nh!*plLS*RQCq-aRcg8J*xL*0Z-@uvpdL@399;Tp`-xR7Fov&2DA(FMR*m}dt?Sl9Lj6@ELX$v}kra9Z z{8+|KUQZkaL2q&8q#+!L_666G#14Ijx(jA{(bey zgN!_JoI8S0s=?{)OgiOcHdA6Gl-kL9I!I17}_;3~?(As1aJ5*5ZaNOm^k z&<%36ZDi6hjg9;a9=MTZ`(m+A^F#x-4|QSAQ#fKIR{&7F;g%uLU4%l33)aX-V4-BT zU}QQv9jgT)fX2qESt?8svMgiRWZYI4&b9~|MM9f_2;X(a^bs^ulgZad5kS5ftLXA1 zgaXb;+<@MA^DKv%AQ^VSTwg#h#7;w>WC^DjBoW8Mj1y4A>Bku(AdthnVsIOf5-Gog z*A0zH9AnHA3_K&(K86$~G$`St0K`zo*|fF;Sg-|EP0@%BKu9cJ^^_aJI|>kjAW#uc zy`s^g(8g0mU&gwF-AlOs7J8SMiY6Y`n+X+ztX9%JKL3+NH zTLpIqR^|Jo?_L6pzu31#k%8^5zCsOA|5D&3fIqb1z4K_n;woClfsK!_)*8SW5jvhP z(9VO;CJ%sVXmO2{G-cu!2UM@*oT{f(t5ks6rFssa8lZ8e-WMpXhjEHU(FE*Cq+D_q z=G`mSc_W(XS8FcE)PP9(j3r+bf|tlELA?!56;@y7>W{byY2Vjs5s()Vk{8$y5tmLE z*5D{*gwFuyL1&vgxVVmv7?{(<4~R&l7K3wxM9q4|cKY4*YJ&j#4T~E!$_ZmoqtFr>Gr{e zssu5cq2k@dQ${Ee&x>0~Q&U7zx?k$vEOoywwQP}Ewi;22yLIvQ8de!ncDvgkBD(!b zAdk74?^N5|J6vghtz!^0xrl7;xEh>K)H>C6h<-rzs*zd(Yix(RUA+k{BYX&z;%cYF z?n2s_)Huo-4c+dJxI5}A3?=MUW3X-9jo$2GZ>|9CjodjddOj!%tB{*RpbTZGK>zyL zr0{GBZ+PUo08dKjUryAKI6cbQP7@9jA3=h&EJj@drPPLO*N8TsznC-_F z-g`$9GMa))7V{2d36qpDXwo(?{K|^n5Y+uMc-9#PB=OLu)@;cEF_Bln(ulI;mc~$)@VE8Gw2GH+Y)oBK51DZL z(80fePkh|8nP=+Fl%W%$I1iO@;941Z+hB5$^~kWb)3Y*ce_wAY`Slwr1GqLe4dh|! zMr5>USSfwCj4C`Pa_I3L|CWByVp&63NxWnUW!B zT|LIu!hwvML(x7Ag8q`oB~!Knr^zt<0~;7Y3JyaoFiW=zH)-M3_d+=X8gnHM7WE~q zT^&8aYPdeD;d2>h3Odx&K8aTiB|%)m>IMA6b!&Uoc1EzWQE^~=mST5j7EG4oP?MId zc7jgJTmTymqEu7sUX76qg&|g5-*)xA)m8N-)>lhX_p#z8BEd_Hqf%@o z%`(ln4g_8t4aDg=iU;+6#*Q+8jSgCqyOhhK@f1iJX(-7Py&dmHx@6yai40`B_2Kh6 z{jz8kW2_*>EVzAh8i|o+EyB5uu*@1CxxX-_0uaQMLL>sdwX47Ve;=uxXOJ)uB0;*9 zhdzU_pb5sHsp*3#sW&(PVl@G>mB)yB5etIV9uBSt%bUb|9ulS14bKvjlq9DRPV3Z zZWl>tSYFu9hrxwfstaU_ZJyn32*If5AVJn$7htI#5p%7AnCn|czoU9ZAJQl3MEhcp zG(0mj&1m|@WpBZ>po0SYGqgSR`VIjTW?kWq9yX@LZ4^HdQv*4@6%U}1WSC$=0t%0y zWg3(zj6^U@HCqnxHRHhYPTLqFI?Xr=8*Dt&XhlU{4gz0C$BzJ85O7Ju&Q5;fu0qqLNe&oN z@HvW5@N9KYy=+(`)D2Zy*rK`*7?32hIcMy?2X@~-yRTW2;oC{6?DWN&BwV-{zz*lL z$rJfYsOVJP+0JP43~XLTR;RiQvrVQ8wuxqJt8BJdHxzjHr<@iLRu}R(s58%T7l<4^&jR`_9pKvr$mVuKifSbsnWBOSZDCJ+~-F5~qF%anTlekO8VA@q7BW>n*fo}!2 zbTsBM%2`gLs({^VCt|e9Bg@+hm|=st?Ui{C0ehj)EZ;N}em}MXa>01$DZG*tlBlvNv%6Ts_)lSw=(x5KXVN^L};GxxRJ^h0oZ{qy01f68? zX$I0i2{>{38iJkdB$Ls;9acyi1+5b8H#gM9^2?RU!Pzw&Oop}czrA;eKS!+ z4BO#O3W3I&eZcDeF!Ku2LCH=1kBf!RoLc822zXrM#o&vf7sC@_cI0yGB}gR*-$i>3 zVJrT@z8t(37S2ov;&%HA_-RGfE+Tf;*DFcM;ZkYKNFc`38i7rOSQ{o!swX_|= zb?9hZxRJw>5wixG0YC!KJ&&^;;8>9pu3Da#<%rW~=uP8~P3Cr7ji2)|^vnoPL}m+O zU$5u7CzdXG&;`i7h^2y}NjEaE8 zjLwH$&LFHLk*h;Ws0&D7eL!z8u#NySEDgizqGLJ^S$a@PuvHiL7o@&unK%deO?2xA z=a1JySL~U9{x;gw33~^z;zZd!S7_VND>a0!ie_+5XLLZ^o7&-HiNSt6 zLWe6`{s8V_6>k_L!H1reH}p5DZU{Qw$t+JJK!qF-on{2>MOw+BI)R@CLcWjSVlN7& z`8r%K0l^FcJrY%htbref>^RJ(Vz`eW+-mnjUJSxoe(P}^M0|C)USJbY*JD`4Sv|o9 zaOd6tPPx5hJChP8b@@JkNjd3Z8vIT~1LA$Mtq1xU#J&?*bp|)p_4Ue{)z>}!_#$9E zjdYrnEY3m}GZil=G9o^Qg(%ctWCB4{N!&q|*BEd!dK^ewdvQZ=c|<87oPt95hsI?mKUv$qWz*|RuYZ2) zcb@;{+qZ2WdCNnufBBs|-n@6`>z{r7Hy_;hrl<99BRe+g|H4JAmJ9lajIoXRERJcM z(_0a-qMcp`6-FhPF6$y&%K?PN=Vv$_=W+WzJ_iE->7iwXOPg|;_^|#Hn?b>g_C(O& zL?G}LXW>z1lR+XQP4|R#e{B6&(3}j2L7^PE0{fD|0OD}GS_+a4vCP(?kFTO6Fc(m{ z+m*cLs=o$FB+d&|5Hq(2zdKDgWf?t#_bzgp^}hjoES;T0uW#G{k_qMiDtrHH41R@y z%%Z`5rL&9x(YHF!>%Z`5rL&9zed(h9{8?A#x$SyEwAi3!ek^Hlv3yc3v22C_4L` z!%39#Mg1F$gKakCoX}y0vU8b|7rr=!bKa@MIhe0cxoOFqiu+HnN^HE5qEcP{8&hJc zvyqMdRs9FQg2qeM(7Ln6@S5p&YKWx?Ky7w_ZJA|w+j@P_(n^p-VS7Xa(u!boA8Z$e^$%#_OKOYmPS zN7e-jn8JS>31tZjGkTFV>_n!3y%iQFKgPemkMv8<i6qE}^C@dTxH^MrHTO0I`BiaG`cKD5*glLjz6ljg$TraWvui?(?#38RNB#bzn z3h%2pXasj!0_AZ=JYO;gC3xx-o^x?#G7W(wJjBv&4W5HTush70gFanz`eAjAGoB8V z$k1b$VQ(bL?ol2NTnOc8}zQ(jGr>LzJ zKVNTaAPDXBA0d4kfJc=9?u3K*IX{npc#oXlj|;>_!wCoTYNHzF;ViXDZB|?8AYN^w zb9g$2r&D;fi_YMo!Gj}sx_?*qsC((iT|J=o(t$gjw$n|!I-nj_aK^3<)BQRfufyFr zoUYU1x;m*&O`gJ1c)LLd`WM-K^191hTyolInQLO1_)7E4!m<$8>v_kMcgxhqFLyJL zw;KEi+td-)iHw=#GegX0WNFc@cW5`q35cI)kY zJk8)NgGU)W#^7-VPcV1~gA{@mh`~-bH;gxsEoaO3?-tmBy9f3d5b{f0m#-pe0D`-J z@;~dJnDhTjzW=EQ(AFDZ4_BP*8V@%li+l>b-&n*z?C*<&AWvyS6pbfXe~`Q(sn`my z_B9a;Oc~cTpF%fDj}$HGrNl=0f|lT-u?`FIwk55U)<`QgwWP&cqy^Z$7WN^f>@}hy z*>Kk)hIeWpC1I;TQ@n;0pmgB1{+iERSaU6lLts_MrMXLUcT&>-d9H#4vIOj5Yd8Mw z#6P|94afUCNc~SAZ;p0LKQTrvV{()6UXi|iyk&$NV<@A~^f-=t^D)0g`7P-iS5-gsv%31TmOsTropN~==R3r2&rD9Z@1_v z*&7~6u7#WU7UN0Q&*0x%^jPwRTJo83{gL&pU0I)5(?7C4$=6z+8D|`^Z;msTG?`%$ zhigJSu=^QxQNk%bbdyPli?T{);SXD9;eN~f)0+`{>j?;90&tj6WSAJT5SJxG3QCQo zEZuy7=^v$K>{0+AMHqh2NRTx*2fPJD$P%oB6o8fXX{qPA$)xys2#V@?%#_sgNXhNB zvb`n_&X55`n$}fmh7har zTAe->fGbeoHV(lGC=j_cSHkl9ckEM4`ZR29#Xr5BeF-wg|KrXlQ6w0vf*V+A2B6Tv zp+;d(!VpX2BmZRSVN%Qz;I_m$2Kmmw%F%%@a>;lnmK3Ef{t(5X5yZJAYE9aZIsqdGQHF?w5^T8v zkf0~nO=*5(xL{_4j@r^uhT=*X=7~YmX~_J6`emnro1r_!x*Hqnn!u@5@WlY7ci_Qv zli04=e`vVjzg^l5YiF|=NJ}=myRw_z_2lel&iO|a=WDxpc8e#u^j(Ns{(cZ0#!cf7 z&rO}syYayFEsBvERtEfWk)nY|F5wBl=3;BVAgfl~e8C?PQ-D2#D+=0{Dw_adDfkk# zq+t`cWI?M0ooPd=2EGEvUnBHG17m``WO`-h{KU0wHjgwegVYnXw0z#VmG1QhGbxZh4QqE8gQQ zAlIhk4{emZG=zhC8mN?XEKb)Oj$fcpgK^XXhj3hogSiC(08Sj|;`XEsPktw&Bx1LT zfKx5H+fRcsfEz8b8pZ9(E!pY7-)YXEvJjU{KSvw~^2TId>|6r*EbGO`aZ;(h5i=a@dFzAAqM*y z$UbarMz7n9a6Nnf%?5c0LoCPs52AoJ$cN|Vy8jLyxZZvK4I1iJ(u(j6s}S83?RNd0 zz(M~VR%_ckA+K(cPspP7e5GfaHJV*OVPdn^m2a^t8m%z9qLE%&vhwXdq{jNM^KtF# z|5|tjsRdCkzL`-;>#Z21YqsVFFqT;DGa#E~D~Fwsjq*PZ_mDMg1JH0I-Fm=3(LUfu z8VwdmeT#v+$$*zmt;l)PL2G01OerSaios$!4wSxwJwC+X2!j>fei(7P*=>%a>GrU_ z-O`Nz9;5I*&4<4CM_=dPUp%zD6DvMTJXQZAE^km7-vTf;OV2+FGJqbTU6d_P2j}6= zwAL{lnu)+?LR{YoE&zm;yu+6@kqTd|fww0Z`mm(>w4X{&zBN+C=et$sd=Irbwa$L9xbwZL zdwvysM--q#pcsSM?r+4cu0Ok7rc{($FTt$J?j^sDeK2NO z>_I!%kqm+!0By!BU_Nln<(oK65~{8BYl1I~22!fWVPs5u9xzC%w~^X!7kfItz8+ia zH)UWvHq^p63i84F8;o$j-!sfMPAvA&&SSa{<2W?GQQEk^2K^aYI)tdL#o%b8Z$Tm*215$8GCUp>gTF%jOYAqwcBuzW;@5Ubi8(3tp}dM zf^etPEDiGpXRw24+u+MM-c#%00U(x98oE^~Q$u5h{MOW1@+_!MUM}rA0m2<#n&j(5 zJfQ~<8OiPAC(eu{aRVpSIoQ)@bFk&1M;}Lw{d3bWwn^g4SGoXRi3UD&aV*{pop?Nn zzti&JJ7=#vFd|6>i}?0xq}<60wwHF^JCcNs3>FaLYeZBd$tNi{gI^ao@IY8_o|b|P zdH62U*FqkWvly{Qg+dharMmH02}gwT3g`Rr86zbv=V>CGy2rtw>b9Lp^BN({ag#VJ z0Jjb9MQDVR@H&;G=aN06yQ_(MF~`b}Ci8(|xWe5%O9n>%zWtDI7k%v^9B2AtQcRYZ z!;YJld{$y$5{=KU@US_!qv#Ai6sqz?@qLKrIK35u$;8;1LMxxyKZ|NsIvKz`B*no> zArCibNt{=@YoZ!=a4VG!D*Q|I$&IsKMKcCmCZ2uHSD@OLvMD%w zyp+B;WzwQ)sW^ugj&m`zDY)WQ(7R_S#lXQ3*oA3!*e>E@p>RtFJvW%myzuy*T{tJc zp}ipbMr$2sMz}uDnx61JI%(`KbM1% z*h>oLc{Cxg3P&$&x_vRi0$6$lw^`e8G=sKb@&nsKYo)$mB1k_UNeC2rt?f9bf%n0k z&?@!^Bi1%+pS{`I7K~e?QTireiQDX<;AVW;kcT)D)@C@5NmzUFrVqp-Lj3E81(coy z<#auI4&El{`2i1&zXWPshWFGRY;Am0K)1sl#ZMS`wPDVcFn4|zLx-4dM?KC ze3>@BJnsQ3QJ(66^(QE)PI#mEtiq8_-d_#EinI%N-C$&U03UmeZKn|7tHw1q4)$mQ zMrp7F$DWV{ohxR<4j$s4jxwAs73l>RHXIlg92|FHM(?Cz7Xd-erSnQpW0X=+-&3XR zdbq)^l(SPuWWPUmdGtVakYD|kV#HAlq)zag1WN$;Xxv$i`-iJCaNxEP18clq1xNQx zc=m~haW;rsnV;qbZ!3GAdP(}_!BdO>8rSO}62gKGD~l!uJlDt04$9~7xmGiJc5&~akv`6$y&Hn#j?^DaE-vBM3kn* zFq--xkZA{&8Qqq4(6H8+-TJA2M#W40SWX@NsCOU$r>rIO4Mctd zAE4}1J(4a~PX_`IDQU#pH1S6+IB`X;znaHsZn(Vw>Ywkbbxrrs80&HXuG;YI_Aah+ z`w;8P_`v`Aq~!t)GP=>zE{s4A>!tBltsAZHx(dAy9J}_Y!9i{U^Slmm>lat64QA|6 z(hx}>BkMCQRP{6)B@Jq=b(?e~<{Bx7O<&yp={0B>>VzrIkU8PKxE4ffg4PD@2O4;W z7z_!FnboGNHnmHBYc@A$lHWSvI5pmFk$16`^@1R@;Sk%*RzzltPVTmHo{c*^+0eI- zSL0^Ujb@z-)t;%jO4@(sVTKz-mRQFD?yVNW(dSUv@`n|Ah0I=Cc6F!+-?hkPXzL(3bJ) z>MhBK0Z)@06OrPT;L-$<$6-9x7HOzGdrMOPBAR#WIb1LOe7X;60*k#>flr3~d4$9f zK6ye9Vid=<^!`>e>9x)z9XP&0g)~U1pF(c0jLu(B879a7oXxGO`}5Z7s&+hnM8BU? zdq>L%I5_H#PmfIH=={)p#m1-5_s0LJqoFTx3B2J*e7m)2x31?62O<39j6t&^%l=_C zl9a76vfe+z%hN?(#L)0!keKQ=TO?gaYWHCpnqK6aCjRhDDtJpyCP7`rPaVU$S9vkG zWmjnc2>LFig8@+iP~1NW!Ymc_F}$%!pTczL4NI;lO~?dd8{f7zI)ylE6eW|O=wp1O zo*~uQ+$uNN1a`OY)z>(A=P)n~j3cx`t-cYnem+Xsi;J3OO-mf3;I-!K5!8K|I znY-dWVsO?L!jQy55E_sA+o%Y6P~LnL>P&I{{`3byvq)MT5$Yhx% z{qbW_7h_otXq#?`zUV3*em_8gn6YStD*i%{A|`xc41_gCBM7?1nqF07*e7o2nJWP` zK_|S8tgdyET7h_H2((rI;u`uFffLv8G=xYPg+Y!dMVjnaEn35|D_rpea(-B}gk3&Q zEc|+$yApFBm^9MYA3U^7o>!~_#b#;5Uj8;CULqtCeH=a-plCO{{6oaU1=J?4{MT>~ z78T#kfpumJCo47xqO28W6?W9l`Pp0!bh;SZkjE8uj^T=r^6_4jC+>3H}`JK$%lBDXaoGzTzZ<-9{<+ zD3d>rAk{&Z7iM)2cc&s*oco16wVbDIr0~xe2>E>nJKhVKOJUB~=!sX_?{Q=A zz^eex<(o}C2$PAO){tydvW@rS-2FBo#k*}f4A(!6re<3=XcXBSipF>2!I0dPWs~IA zZq0JQYf-DNcj5{H9a{;XSeXzf+qAI8hNb5|`9jHp4SsDqtzbS0q8&e+)f2)j1Xnz8 zE(Hxe2wmr4=;90KA4Xl+RH!Sc--R+`Sej5n^Jt?iA}9uh1>z#g_h1mr=_M#@$$uKZ z9)HKX-(>I*M_Gg#qkJT{BWI0p=tk}*5CM-~xr#Y~B*}Z2M^107U~{@*s11khFm8ZC z(VdE8$b4^!=(d!1zJbQQd5sMJ9`1oLPe0oE$iLX)-GcO@tdyehvh}BmRRQ12_zL0)PdQL}(DX zweg{oNTUZ3y|D)}K^$TTjmqqVxTu8IN~VGe>r;U)LURg3SoU~P*1ha6m$HrnG}_pI?Yl7{S!U-{yH`PhiQiYit# zN#BQN-rX{we1^!6YZ8|cxg5c?oMS0jJ_g4z7`DJVdQ{+|!sXxcV(b6GWdGAdtzuCT zJ-~sGU@h*B9ARb}E=vipF4)KWw=nQ4#l~qi0UwOl?`1?psyORASu!9sCvV=|?z%M-hx4Ibufg z7-LumdNsv+=|u2|T#t-=%!{Ves+{Hbw+VFN;RMe7^k1@3zs%qegIC#F+0j#c{2c}~ z(uR0#itLg3tnk~J>Jr}_=N)a)G=2f|g{lsad)mbbK}U1dc>Ts#SJUwX6 zB=Tb)Q)~uTFg}Gkdk!w_m`uJ6fz>=m!MEl%@}fAoC~KRdFoROUa+3O=*Y3-hxH_T_c^8! zGl}adMEdN})A)498FPenhWV4su%CBZ5O~qY4xfJb zQuNS`j(n3N~$WtzX!lsMhiPa}?oX7XuT{XA2Y89c$3 z_Z~FM<`BRp_(mL)tnl3n3_il(=NMGj(=YMvs|>!$;LjL*o56P({4IljWbgw9|D8cE zv#nw96+Tj5EPj;5HLmeVd5Ccb8Kf9oV(=aYWdoP7Ls-u)H}{VMO? z&4A}W#GGAhtPSBpBUMe~N3j|dyEriz6LTR$(H!L4EexpU7KyD0+=LPrY%jP;K$%`d zl2qc@39bixJoEU;(}E)eyn3tn%{Xim82RxNnUr!~Wcu(pQl4o6jRb1Ox+%8LxW=SG z2(S)%5DJmSxYa*=yt4J5h_4%I?cR_uArM;+;3)GT9AHv>$d8?LTghNN5)a4lue&qe z-Q7bWb~osnfndBV671{m8|dqbh5LKrJxCRaM-uoKOT^*{pl4u^_3>~#fsdqYjwfP0 z@lHGqb;slTV?*&scLeG8#G) literal 0 HcmV?d00001 diff --git a/utils/__pycache__/torch_utils.cpython-38.pyc b/utils/yolov5/__pycache__/torch_utils.cpython-38.pyc similarity index 70% rename from utils/__pycache__/torch_utils.cpython-38.pyc rename to utils/yolov5/__pycache__/torch_utils.cpython-38.pyc index 78cc54346dc0fd963a56f7ff8f869505f09ef805..caeec97d5a66ae18a50f78c6689913f8aab5f821 100644 GIT binary patch delta 1266 zcmYjRU1(cn81_9me@S}QuI<_;Yu0tOSx(z{YP+PVHnkc3bJ$e7Rx}llA?J|9B}@EH z>^7z-7H>q5?t>vzn441^xC=c9&Wm~B#ilS3FGPdhxC;?P7i z=j7ZboZSR%P16Do;qSMuQsMP2?JJ*Rsax0k2K-jwys!zme!I^WKHP5w@7f3M+AWsi z2H<^={sPSA7X;IleG*I^F-oh5V@|6xpBT?&QCZTA=g8kN%D4Zj2p+F?V|`3bHq2 z%)K@~4XFug^D)g6zzc5Jrd2L9Sc=pkD)N6fUCoJwWAz4$k#B;h6A?)A^~4&?@Xr&M zk(NnB*515>?XCH8`HCMPZNEH02l4fo8<*GbcGF)zC@5k zXey>@)e5G`yi;2z-6Zgiu6r5j8C;Hvg;WrwfU35zW&X!>YWWUI^X#7oI#3&&$$NTV zBWK-IsRsdP7$QDQT+Fg@G7cfE>KXPD8Nka=e1acc+f4ssO3^-UvwUUewy8Dq6VN{s w^FHh6ku~L_-LQ{uuojYc5w!AGX5Q?(O;n?z-bv}i#t3EzGCV!m%SSTKe`eVknE(I) delta 1280 zcmYjRO>Yxt5bmtk>-85-lYo<$5JDjFIv?voY-~y#qofTG5okWzG^$uRyNhjMW9!`w zX+)(~$)Qr!3w=x%`)KBw znP+C+eb?7E;FArAE<~fD7w|XMI;F2)jehF`BXITCo<7+KZV4N()NA&b!j1h#=&66; zsUJ2XP6#%Ee=cbLzX25-$>)dhGis$#H`Q{pWZ9*fI--~KimBQSrdQO;%CfprvstOO zW>*wvclOo@vC0e-_j1d?;#e~+BaKp%vi9|Cs(B78gE?tBoO z79iq02wz9Wv7)d0Aja>tSQ4$mf_&n|->^E``p^$CXFL9fjNaY-UqNex;xz0{VDFF} zbJh<$6fx_Sf z@I@GPHV^lMD`DQu7eLXxr3`s&2Dm#HL8_KWRmFMg3yaW+{s0`LRql z<4P7$0WI7RTko}Zm2;wb+zSCwBuQ}MF_ zp2+W$rinkxZxuS)Cegftk0s(TvJjHkBzEg|-p_Xm3EBhCPr$hAGv971z($d}DX=8} z{^j$hyxqK2lBDyJI$Am-|k&NUkUDHc|~_9 zR-i1eQ47zE^>=wYdBW=$Uh>r~?o1H_*rcn!e~XqwT`HH~?vcAofUI=c8Qnjhg~$*3WN=nu(qgw6853c0gS zkb0FpdQO}=;7r~X`wnf^K}wwn*nWuYVY0;;mLVsLFt40oIynNrF#Z|HJX-86M!nB} zq-J)|G$fh; diff --git a/utils/dataloaders.py b/utils/yolov5/dataloaders.py similarity index 99% rename from utils/dataloaders.py rename to utils/yolov5/dataloaders.py index 76a8f40..b30c0f1 100644 --- a/utils/dataloaders.py +++ b/utils/yolov5/dataloaders.py @@ -30,9 +30,9 @@ # from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, # cutout, letterbox, mixup, random_perspective) -from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, +from .general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) -from utils.torch_utils import torch_distributed_zero_first +from .torch_utils import torch_distributed_zero_first # Parameters HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' diff --git a/utils/general.py b/utils/yolov5/general.py similarity index 99% rename from utils/general.py rename to utils/yolov5/general.py index 9b10734..990ec5c 100644 --- a/utils/general.py +++ b/utils/yolov5/general.py @@ -34,7 +34,7 @@ import torchvision import yaml -from utils import TryExcept, emojis +from . import TryExcept, emojis # from utils.downloads import gsutil_getsize # from utils.metrics import box_iou, fitness diff --git a/utils/torch_utils.py b/utils/yolov5/torch_utils.py similarity index 99% rename from utils/torch_utils.py rename to utils/yolov5/torch_utils.py index 9f257d0..6338fab 100644 --- a/utils/torch_utils.py +++ b/utils/yolov5/torch_utils.py @@ -19,7 +19,7 @@ import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP -from utils.general import LOGGER, check_version, colorstr, file_date, git_describe +from .general import LOGGER, check_version, colorstr, file_date, git_describe LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1))