forked from Github/frigate
AMD GPU support with the rocm detector and YOLOv8 pretrained model download (#9762)
* ROCm AMD/GPU based build and detector, WIP * detectors/rocm: separate yolov8 postprocessing into own function; fix box scaling; use cv2.dnn.blobForImage for preprocessing; assert on required model parameters * AMD/ROCm: add couple of more ultralytics models; comments * docker/rocm: make imported model files readable by all * docker/rocm: readme about running on AMD GPUs * docker/rocm: updated README * docker/rocm: updated README * docker/rocm: updated README * detectors/rocm: separated preprocessing functions into yolo_utils.py * detector/plugins: added onnx cpu plugin * docker/rocm: updated container with limite label sets * example detectors view * docker/rocm: updated README.md * docker/rocm: update README.md * docker/rocm: do not set HSA_OVERRIDE_GFX_VERSION at all for the general version as the empty value broke rocm * detectors: simplified/optimized yolov8_postprocess * detector/yolo_utils: indentation, remove unused variable * detectors/rocm: default option to conserve cpu usage at the expense of latency * detectors/yolo_utils: use nms to prefilter overlapping boxes if too many detected * detectors/edgetpu_tfl: add support for yolov8 * util/download_models: script to download yolov8 model files * docker/main: add download-models overlay into s6 startup * detectors/rocm: assume models are in /config/model_cache/yolov8/ * docker/rocm: compile onnx files into mxr files at startup * switch model download into bash script * detectors/rocm: automatically override HSA_OVERRIDE_GFX_VERSION for couple of known chipsets * docs: rocm detector first notes * typos * describe builds (harakas temporary) * docker/rocm: also build a version for gfx1100 * docker/rocm: use cp instead of tar * docker.rocm: remove README as it is now in detector config * frigate/detectors: renamed yolov8_preprocess->preprocess, pass input tensor element type * docker/main: use newer openvino (2023.3.0) * detectors: implement class aggregation * update yolov8 model * add openvino/yolov8 support for label aggregation * docker: remove pointless s6/timeout-up files * Revert "detectors: implement class aggregation" This reverts commitdcfe6bbf6f. * detectors/openvino: remove class aggregation * detectors: increase yolov8 postprocessing score trershold to 0.5 * docker/rocm: separate rocm distributed files into its own build stage * Update object_detectors.md * updated CODEOWNERS file for rocm * updated build names for documentation * Revert "docker/main: use newer openvino (2023.3.0)" This reverts commitdee95de908. * reverrted openvino detector * reverted edgetpu detector * scratched rocm docs from any mention of edgetpu or openvino * Update docs/docs/configuration/object_detectors.md Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> * renamed frigate.detectors.yolo_utils.py -> frigate.detectors.util.py * clarified rocm example performance * Improved wording and clarified text * Mentioned rocm detector for AMD GPUs * applied ruff formating * applied ruff suggested fixes * docker/rocm: fix missing argument resulting in larger docker image sizes * docs/configuration/object_detectors: fix links to yolov8 release files --------- Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
This commit is contained in:
65
frigate/detectors/plugins/onnx.py
Normal file
65
frigate/detectors/plugins/onnx.py
Normal file
@@ -0,0 +1,65 @@
|
||||
import glob
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from frigate.detectors.util import preprocess, yolov8_postprocess
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DETECTOR_KEY = "onnx"
|
||||
|
||||
|
||||
class ONNXDetectorConfig(BaseDetectorConfig):
|
||||
type: Literal[DETECTOR_KEY]
|
||||
|
||||
|
||||
class ONNXDetector(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, detector_config: ONNXDetectorConfig):
|
||||
try:
|
||||
import onnxruntime
|
||||
|
||||
logger.info("ONNX: loaded onnxruntime module")
|
||||
except ModuleNotFoundError:
|
||||
logger.error(
|
||||
"ONNX: module loading failed, need 'pip install onnxruntime'?!?"
|
||||
)
|
||||
raise
|
||||
|
||||
assert (
|
||||
detector_config.model.model_type == "yolov8"
|
||||
), "ONNX: detector_config.model.model_type: only yolov8 supported"
|
||||
assert (
|
||||
detector_config.model.input_tensor == "nhwc"
|
||||
), "ONNX: detector_config.model.input_tensor: only nhwc supported"
|
||||
if detector_config.model.input_pixel_format != "rgb":
|
||||
logger.warn(
|
||||
"ONNX: detector_config.model.input_pixel_format: should be 'rgb' for yolov8, but '{detector_config.model.input_pixel_format}' specified!"
|
||||
)
|
||||
|
||||
assert detector_config.model.path is not None, (
|
||||
"ONNX: No model.path configured, please configure model.path and model.labelmap_path; some suggestions: "
|
||||
+ ", ".join(glob.glob("/config/model_cache/yolov8/*.onnx"))
|
||||
+ " and "
|
||||
+ ", ".join(glob.glob("/config/model_cache/yolov8/*_labels.txt"))
|
||||
)
|
||||
|
||||
path = detector_config.model.path
|
||||
logger.info(f"ONNX: loading {detector_config.model.path}")
|
||||
self.model = onnxruntime.InferenceSession(path)
|
||||
logger.info(f"ONNX: {path} loaded")
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
model_input_name = self.model.get_inputs()[0].name
|
||||
model_input_shape = self.model.get_inputs()[0].shape
|
||||
|
||||
tensor_input = preprocess(tensor_input, model_input_shape, np.float32)
|
||||
|
||||
tensor_output = self.model.run(None, {model_input_name: tensor_input})[0]
|
||||
|
||||
return yolov8_postprocess(model_input_shape, tensor_output)
|
||||
143
frigate/detectors/plugins/rocm.py
Normal file
143
frigate/detectors/plugins/rocm.py
Normal file
@@ -0,0 +1,143 @@
|
||||
import ctypes
|
||||
import glob
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import numpy as np
|
||||
from pydantic import Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from frigate.detectors.util import preprocess, yolov8_postprocess
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DETECTOR_KEY = "rocm"
|
||||
|
||||
|
||||
def detect_gfx_version():
|
||||
return subprocess.getoutput(
|
||||
"unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo | grep gfx |head -1|awk '{print $2}'"
|
||||
)
|
||||
|
||||
|
||||
def auto_override_gfx_version():
|
||||
# If environment varialbe already in place, do not override
|
||||
gfx_version = detect_gfx_version()
|
||||
old_override = os.getenv("HSA_OVERRIDE_GFX_VERSION")
|
||||
if old_override not in (None, ""):
|
||||
logger.warning(
|
||||
f"AMD/ROCm: detected {gfx_version} but HSA_OVERRIDE_GFX_VERSION already present ({old_override}), not overriding!"
|
||||
)
|
||||
return old_override
|
||||
mapping = {
|
||||
"gfx90c": "9.0.0",
|
||||
"gfx1031": "10.3.0",
|
||||
"gfx1103": "11.0.0",
|
||||
}
|
||||
override = mapping.get(gfx_version)
|
||||
if override is not None:
|
||||
logger.warning(
|
||||
f"AMD/ROCm: detected {gfx_version}, overriding HSA_OVERRIDE_GFX_VERSION={override}"
|
||||
)
|
||||
os.putenv("HSA_OVERRIDE_GFX_VERSION", override)
|
||||
return override
|
||||
return ""
|
||||
|
||||
|
||||
class ROCmDetectorConfig(BaseDetectorConfig):
|
||||
type: Literal[DETECTOR_KEY]
|
||||
conserve_cpu: bool = Field(
|
||||
default=True,
|
||||
title="Conserve CPU at the expense of latency (and reduced max throughput)",
|
||||
)
|
||||
auto_override_gfx: bool = Field(
|
||||
default=True, title="Automatically detect and override gfx version"
|
||||
)
|
||||
|
||||
|
||||
class ROCmDetector(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, detector_config: ROCmDetectorConfig):
|
||||
if detector_config.auto_override_gfx:
|
||||
auto_override_gfx_version()
|
||||
|
||||
try:
|
||||
sys.path.append("/opt/rocm/lib")
|
||||
import migraphx
|
||||
|
||||
logger.info("AMD/ROCm: loaded migraphx module")
|
||||
except ModuleNotFoundError:
|
||||
logger.error("AMD/ROCm: module loading failed, missing ROCm environment?")
|
||||
raise
|
||||
|
||||
if detector_config.conserve_cpu:
|
||||
logger.info("AMD/ROCm: switching HIP to blocking mode to conserve CPU")
|
||||
ctypes.CDLL("/opt/rocm/lib/libamdhip64.so").hipSetDeviceFlags(4)
|
||||
assert (
|
||||
detector_config.model.model_type == "yolov8"
|
||||
), "AMD/ROCm: detector_config.model.model_type: only yolov8 supported"
|
||||
assert (
|
||||
detector_config.model.input_tensor == "nhwc"
|
||||
), "AMD/ROCm: detector_config.model.input_tensor: only nhwc supported"
|
||||
if detector_config.model.input_pixel_format != "rgb":
|
||||
logger.warn(
|
||||
"AMD/ROCm: detector_config.model.input_pixel_format: should be 'rgb' for yolov8, but '{detector_config.model.input_pixel_format}' specified!"
|
||||
)
|
||||
|
||||
assert detector_config.model.path is not None, (
|
||||
"No model.path configured, please configure model.path and model.labelmap_path; some suggestions: "
|
||||
+ ", ".join(glob.glob("/config/model_cache/yolov8/*.onnx"))
|
||||
+ " and "
|
||||
+ ", ".join(glob.glob("/config/model_cache/yolov8/*_labels.txt"))
|
||||
)
|
||||
|
||||
path = detector_config.model.path
|
||||
mxr_path = os.path.splitext(path)[0] + ".mxr"
|
||||
if path.endswith(".mxr"):
|
||||
logger.info(f"AMD/ROCm: loading parsed model from {mxr_path}")
|
||||
self.model = migraphx.load(mxr_path)
|
||||
elif os.path.exists(mxr_path):
|
||||
logger.info(f"AMD/ROCm: loading parsed model from {mxr_path}")
|
||||
self.model = migraphx.load(mxr_path)
|
||||
else:
|
||||
logger.info(f"AMD/ROCm: loading model from {path}")
|
||||
if path.endswith(".onnx"):
|
||||
self.model = migraphx.parse_onnx(path)
|
||||
elif (
|
||||
path.endswith(".tf")
|
||||
or path.endswith(".tf2")
|
||||
or path.endswith(".tflite")
|
||||
):
|
||||
# untested
|
||||
self.model = migraphx.parse_tf(path)
|
||||
else:
|
||||
raise Exception(f"AMD/ROCm: unkown model format {path}")
|
||||
logger.info("AMD/ROCm: compiling the model")
|
||||
self.model.compile(
|
||||
migraphx.get_target("gpu"), offload_copy=True, fast_math=True
|
||||
)
|
||||
logger.info(f"AMD/ROCm: saving parsed model into {mxr_path}")
|
||||
os.makedirs("/config/model_cache/rocm", exist_ok=True)
|
||||
migraphx.save(self.model, mxr_path)
|
||||
logger.info("AMD/ROCm: model loaded")
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
model_input_name = self.model.get_parameter_names()[0]
|
||||
model_input_shape = tuple(
|
||||
self.model.get_parameter_shapes()[model_input_name].lens()
|
||||
)
|
||||
tensor_input = preprocess(tensor_input, model_input_shape, np.float32)
|
||||
|
||||
detector_result = self.model.run({model_input_name: tensor_input})[0]
|
||||
|
||||
addr = ctypes.cast(detector_result.data_ptr(), ctypes.POINTER(ctypes.c_float))
|
||||
tensor_output = np.ctypeslib.as_array(
|
||||
addr, shape=detector_result.get_shape().lens()
|
||||
)
|
||||
|
||||
return yolov8_postprocess(model_input_shape, tensor_output)
|
||||
83
frigate/detectors/util.py
Normal file
83
frigate/detectors/util.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import logging
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def preprocess(tensor_input, model_input_shape, model_input_element_type):
|
||||
model_input_shape = tuple(model_input_shape)
|
||||
assert tensor_input.dtype == np.uint8, f"tensor_input.dtype: {tensor_input.dtype}"
|
||||
if len(tensor_input.shape) == 3:
|
||||
tensor_input = tensor_input[np.newaxis, :]
|
||||
if model_input_element_type == np.uint8:
|
||||
# nothing to do for uint8 model input
|
||||
assert (
|
||||
model_input_shape == tensor_input.shape
|
||||
), f"model_input_shape: {model_input_shape}, tensor_input.shape: {tensor_input.shape}"
|
||||
return tensor_input
|
||||
assert (
|
||||
model_input_element_type == np.float32
|
||||
), f"model_input_element_type: {model_input_element_type}"
|
||||
# tensor_input must be nhwc
|
||||
assert tensor_input.shape[3] == 3, f"tensor_input.shape: {tensor_input.shape}"
|
||||
if tensor_input.shape[1:3] != model_input_shape[2:4]:
|
||||
logger.warn(
|
||||
f"preprocess: tensor_input.shape {tensor_input.shape} and model_input_shape {model_input_shape} do not match!"
|
||||
)
|
||||
# cv2.dnn.blobFromImage is faster than numpying it
|
||||
return cv2.dnn.blobFromImage(
|
||||
tensor_input[0],
|
||||
1.0 / 255,
|
||||
(model_input_shape[3], model_input_shape[2]),
|
||||
None,
|
||||
swapRB=False,
|
||||
)
|
||||
|
||||
|
||||
def yolov8_postprocess(
|
||||
model_input_shape,
|
||||
tensor_output,
|
||||
box_count=20,
|
||||
score_threshold=0.5,
|
||||
nms_threshold=0.5,
|
||||
):
|
||||
model_box_count = tensor_output.shape[2]
|
||||
probs = tensor_output[0, 4:, :]
|
||||
all_ids = np.argmax(probs, axis=0)
|
||||
all_confidences = probs.T[np.arange(model_box_count), all_ids]
|
||||
all_boxes = tensor_output[0, 0:4, :].T
|
||||
mask = all_confidences > score_threshold
|
||||
class_ids = all_ids[mask]
|
||||
confidences = all_confidences[mask]
|
||||
cx, cy, w, h = all_boxes[mask].T
|
||||
|
||||
if model_input_shape[3] == 3:
|
||||
scale_y, scale_x = 1 / model_input_shape[1], 1 / model_input_shape[2]
|
||||
else:
|
||||
scale_y, scale_x = 1 / model_input_shape[2], 1 / model_input_shape[3]
|
||||
detections = np.stack(
|
||||
(
|
||||
class_ids,
|
||||
confidences,
|
||||
scale_y * (cy - h / 2),
|
||||
scale_x * (cx - w / 2),
|
||||
scale_y * (cy + h / 2),
|
||||
scale_x * (cx + w / 2),
|
||||
),
|
||||
axis=1,
|
||||
)
|
||||
if detections.shape[0] > box_count:
|
||||
# if too many detections, do nms filtering to suppress overlapping boxes
|
||||
boxes = np.stack((cx - w / 2, cy - h / 2, w, h), axis=1)
|
||||
indexes = cv2.dnn.NMSBoxes(boxes, confidences, score_threshold, nms_threshold)
|
||||
detections = detections[indexes]
|
||||
# if still too many, trim the rest by confidence
|
||||
if detections.shape[0] > box_count:
|
||||
detections = detections[
|
||||
np.argpartition(detections[:, 1], -box_count)[-box_count:]
|
||||
]
|
||||
detections = detections.copy()
|
||||
detections.resize((box_count, 6))
|
||||
return detections
|
||||
Reference in New Issue
Block a user