From 7a36505023f70fae6cd6c32eac4cb53b0787881c Mon Sep 17 00:00:00 2001 From: cxnt Date: Fri, 29 Nov 2024 19:21:07 +0400 Subject: [PATCH] update launch configuration and enhance RT-DETRv2 model integration --- .vscode/launch.json | 34 +++++++- supervisely_integration/serve/rtdetrv2.py | 99 +++++++++++++++++------ 2 files changed, 106 insertions(+), 27 deletions(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index f72573cd7..b8666b942 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -15,7 +15,7 @@ "--ws", "websockets", "--app-dir", - "supervisely_integration/train", + "supervisely_integration/train" ], "justMyCode": false, "env": { @@ -23,11 +23,13 @@ "LOG_LEVEL": "DEBUG", "DEBUG_APP_DIR": "app_data", // "APP_NAME": "Train RT-DETR", - "PROJECT_ID": "41442" + "TEAM_ID": "8", + "WORKSPACE_ID": "349", + "PROJECT_ID": "42201" } }, { - "name": "Uvicorn Serve", + "name": "Uvicorn Serve RT-DETR v1", "type": "debugpy", "request": "launch", "module": "uvicorn", @@ -50,6 +52,30 @@ "DEBUG_WITH_SLY_NET": "1" } }, + { + "name": "Uvicorn Serve RT-DETR v2", + "type": "debugpy", + "request": "launch", + "module": "uvicorn", + "args": [ + "rtdetrv2:model.app", + "--host", + "0.0.0.0", + "--port", + "8000", + "--ws", + "websockets", + "--app-dir", + "supervisely_integration/serve" + ], + "justMyCode": false, + "env": { + "PYTHONPATH": "${workspaceFolder}:${PYTHONPATH}", + "LOG_LEVEL": "DEBUG", + "DEBUG_APP_DIR": "app_data", + "DEBUG_WITH_SLY_NET": "1" + } + }, { "name": "Python: Current File", "type": "debugpy", @@ -63,4 +89,4 @@ } } ] -} \ No newline at end of file +} diff --git a/supervisely_integration/serve/rtdetrv2.py b/supervisely_integration/serve/rtdetrv2.py index 48d81958d..8c4e6e74c 100644 --- a/supervisely_integration/serve/rtdetrv2.py +++ b/supervisely_integration/serve/rtdetrv2.py @@ -1,14 +1,34 @@ +import os +from pathlib import Path from typing import List -import torch -from PIL import Image + import numpy as np +import torch +import torchvision.transforms as T import yaml +from dotenv import load_dotenv +from PIL import Image + +import supervisely as sly from rtdetrv2_pytorch.src.core import YAMLConfig -import torchvision.transforms as T +if sly.is_development(): + load_dotenv("local.env") + load_dotenv(os.path.expanduser("~/supervisely.env")) + +root_dir = Path(__file__).parent.parent +app_options_path = os.path.join(root_dir, "serve", "app_options.yaml") +models_path = os.path.join(root_dir, "serve", "models_v2.json") + + +class RTDETRv2(sly.nn.inference.ObjectDetection): + FRAMEWORK_NAME = "RT-DETRv2" + MODELS = models_path + APP_OPTIONS = app_options_path -class RTDETRv2: - def load_model(self, model_files: dict, device: str): + def load_model( + self, model_source: str, model_files: dict, model_info: dict, device: str, runtime: str + ): config_path = model_files["config"] checkpoint_path = model_files["checkpoint"] @@ -19,18 +39,20 @@ def load_model(self, model_files: dict, device: str): config.pop("__include__") with open(config_path, "w") as f: yaml.dump(config, f) - + cfg = YAMLConfig(config_path, resume=checkpoint_path) - checkpoint = torch.load(checkpoint_path, map_location='cpu') - state = checkpoint['ema']['module'] if 'ema' in checkpoint else checkpoint['model'] + checkpoint = torch.load(checkpoint_path, map_location="cpu") + state = checkpoint["ema"]["module"] if "ema" in checkpoint else checkpoint["model"] model = cfg.model model.load_state_dict(state) model.deploy().to(device) cfg.postprocessor.deploy().to(device) - self.transforms = T.Compose([ - T.Resize((640, 640)), - T.ToTensor(), - ]) + self.transforms = T.Compose( + [ + T.Resize((640, 640)), + T.ToTensor(), + ] + ) self.cfg = cfg self.model = model self.postprocessor = cfg.postprocessor @@ -46,15 +68,46 @@ def predict_batch(self, images: List[np.ndarray]): return labels, boxes, scores -rtdetrv2 = RTDETRv2() +source_path = __file__ +settings_path = os.path.join(os.path.dirname(source_path), "inference_settings.yaml") +model = RTDETRv2( + use_gui=True, + custom_inference_settings=settings_path, + use_serving_gui_template=True, +) +model.serve() # Load model -model_files = { - "config": "app_data/work_dir/model/model_config.yml", - "checkpoint": "app_data/work_dir/model/checkpoint0005.pth", -} -device = "cuda" if torch.cuda.is_available() else "cpu" -rtdetrv2.load_model(model_files, device) -# Predict -images = [np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8) for _ in range(2)] -labels, boxes, scores = rtdetrv2.predict_batch(images) -print(labels, boxes, scores) + +# local +# model_files = { +# "config": "app_data/work_dir/model/model_config.yml", +# "checkpoint": "app_data/work_dir/model/checkpoint0005.pth", +# } +# # remote +# model_info = { +# "Model": "RT-DETRv2-S", +# "dataset": "COCO", +# "AP_val": 48.1, +# "Params(M)": 20, +# "FPS(T4)": 217, +# "meta": { +# "task_type": "object detection", +# "model_name": "RT-DETRv2-S", +# "model_files": { +# "checkpoint": "https://github.com/lyuwenyu/storage/releases/download/v0.2/rtdetrv2_r18vd_120e_coco_rerun_48.1.pth", +# "config": "rtdetrv2_r18vd_120e_coco.yml", +# }, +# }, +# } +# model_files = { +# "checkpoint": "https://github.com/lyuwenyu/storage/releases/download/v0.2/rtdetrv2_r18vd_120e_coco_rerun_48.1.pth", +# "config": "rtdetrv2_r18vd_120e_coco.yml", +# } +# runtime = "PyTorch" + +# device = "cuda" if torch.cuda.is_available() else "cpu" +# # rtdetrv2.load_model(model_files, model_info, device, runtime) +# # Predict +# images = [np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8) for _ in range(2)] +# labels, boxes, scores = rtdetrv2.predict_batch(images) +# print(labels, boxes, scores)