Nvidia Jetson ffmpeg + TensorRT support (#6458)

* Non-Jetson changes

Required for later commits:
- Allow base image to be overridden (and don't assume its WORKDIR)
- Ensure python3.9
- Map hwaccel decode presets as strings instead of lists
Not required:
- Fix existing documentation
- Simplify hwaccel scale logic

* Prepare for multi-arch tensorrt build

* Add tensorrt images for Jetson boards

* Add Jetson ffmpeg hwaccel

* Update docs

* Add CODEOWNERS

* CI

* Change default model from yolov7-tiny-416 to yolov7-320

In my experience the tiny models perform markedly worse without being
much faster

* fixup! Update docs
This commit is contained in:
Andrew Reiter
2023-07-26 06:50:41 -04:00
committed by GitHub
parent 680198148b
commit a96a951e23
28 changed files with 567 additions and 139 deletions

View File

@@ -3,11 +3,14 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
FROM debian:11 AS base
ARG BASE_IMAGE=debian:11
ARG SLIM_BASE=debian:11-slim
FROM ${BASE_IMAGE} AS base
FROM --platform=linux/amd64 debian:11 AS base_amd64
FROM debian:11-slim AS slim-base
FROM ${SLIM_BASE} AS slim-base
FROM slim-base AS wget
ARG DEBIAN_FRONTEND
@@ -123,8 +126,8 @@ RUN apt-get -qq update \
&& echo "deb http://deb.debian.org/debian bullseye main contrib non-free" | tee /etc/apt/sources.list.d/raspi.list \
&& apt-get -qq update \
&& apt-get -qq install -y \
python3 \
python3-dev \
python3.9 \
python3.9-dev \
wget \
# opencv dependencies
build-essential cmake git pkg-config libgtk-3-dev \
@@ -137,14 +140,17 @@ RUN apt-get -qq update \
gcc gfortran libopenblas-dev liblapack-dev && \
rm -rf /var/lib/apt/lists/*
# Ensure python3 defaults to python3.9
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& python3 get-pip.py "pip"
COPY docker/main/requirements.txt /requirements.txt
RUN pip3 install -r requirements.txt
RUN pip3 install -r /requirements.txt
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/wheels -r requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt
# Collect deps in a single layer
@@ -176,6 +182,7 @@ RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_de
/deps/install_deps.sh
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
python3 -m pip install --upgrade pip && \
pip3 install -U /deps/wheels/*.whl
COPY --from=deps-rootfs / /

View File

@@ -10,11 +10,15 @@ apt-get -qq install --no-install-recommends -y \
wget \
procps vainfo \
unzip locales tzdata libxml2 xz-utils \
python3.9 \
python3-pip \
curl \
jq \
nethogs
# ensure python3 defaults to python3.9
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
mkdir -p -m 600 /root/.gnupg
# add coral repo
@@ -23,8 +27,10 @@ curl -fsSLo - https://packages.cloud.google.com/apt/doc/apt-key.gpg | \
echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | tee /etc/apt/sources.list.d/coral-edgetpu.list
echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections
# enable non-free repo
sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list
# enable non-free repo in Debian
if grep -q "Debian" /etc/issue; then
sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list
fi
# coral drivers
apt-get -qq update

View File

@@ -9,28 +9,11 @@ ARG DEBIAN_FRONTEND
ARG TARGETARCH
# Add TensorRT wheels to another folder
COPY docker/tensorrt/requirements.txt /requirements-tensorrt.txt
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
# Build TensorRT-specific library
FROM nvcr.io/nvidia/tensorrt:23.03-py3 AS trt-deps
RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \
/tensorrt_libyolo.sh
# Frigate w/ TensorRT Support as separate image
FROM deps AS frigate-tensorrt
#Disable S6 Global timeout
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
FROM tensorrt-base AS frigate-tensorrt
ENV TRT_VER=8.5.3
ENV YOLO_MODELS="yolov7-tiny-416"
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
COPY docker/tensorrt/detector/rootfs/ /
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl && \
ldconfig

View File

@@ -0,0 +1,79 @@
# syntax=docker/dockerfile:1.4
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
ARG BASE_IMAGE
FROM ${BASE_IMAGE} AS build-wheels
ARG DEBIAN_FRONTEND
# Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \
&& apt-get -qq install -y --no-install-recommends \
python3.9 python3.9-dev \
wget build-essential cmake git \
&& rm -rf /var/lib/apt/lists/*
# Ensure python3 defaults to python3.9
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& python3 get-pip.py "pip"
FROM build-wheels AS trt-wheels
ARG DEBIAN_FRONTEND
ARG TARGETARCH
# python-tensorrt build deps are 3.4 GB!
RUN apt-get update \
&& apt-get install -y ccache cuda-cudart-dev-* cuda-nvcc-* libnvonnxparsers-dev libnvparsers-dev libnvinfer-plugin-dev \
&& ([ -e /usr/local/cuda ] || ln -s /usr/local/cuda-* /usr/local/cuda) \
&& rm -rf /var/lib/apt/lists/*;
# Determine version of tensorrt already installed in base image, e.g. "Version: 8.4.1-1+cuda11.4"
RUN NVINFER_VER=$(dpkg -s libnvinfer8 | grep -Po "Version: \K.*") \
&& echo $NVINFER_VER | grep -Po "^\d+\.\d+\.\d+" > /etc/TENSORRT_VER
RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,target=/deps/build_python_tensorrt.sh \
--mount=type=cache,target=/root/.ccache \
export PATH="/usr/lib/ccache:$PATH" CCACHE_DIR=/root/.ccache CCACHE_MAXSIZE=2G \
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
RUN pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
FROM build-wheels AS trt-model-wheels
ARG DEBIAN_FRONTEND
RUN apt-get update \
&& apt-get install -y protobuf-compiler libprotobuf-dev \
&& rm -rf /var/lib/apt/lists/*
RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \
pip3 wheel --wheel-dir=/trt-model-wheels -r /requirements-tensorrt-models.txt
FROM wget AS jetson-ffmpeg
ARG DEBIAN_FRONTEND
ENV CCACHE_DIR /root/.ccache
ENV CCACHE_MAXSIZE 2G
RUN --mount=type=bind,source=docker/tensorrt/build_jetson_ffmpeg.sh,target=/deps/build_jetson_ffmpeg.sh \
--mount=type=cache,target=/root/.ccache \
/deps/build_jetson_ffmpeg.sh
# Frigate w/ TensorRT for NVIDIA Jetson platforms
FROM tensorrt-base AS frigate-tensorrt
RUN apt-get update \
&& apt-get install -y python-is-python3 libprotobuf17 \
&& rm -rf /var/lib/apt/lists/*
RUN rm -rf /usr/lib/btbn-ffmpeg/
COPY --from=jetson-ffmpeg /rootfs /
COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
--mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \
pip3 install -U /deps/trt-wheels/*.whl /deps/trt-model-wheels/*.whl \
&& ldconfig
WORKDIR /opt/frigate/
COPY --from=rootfs / /

View File

@@ -0,0 +1,26 @@
# syntax=docker/dockerfile:1.4
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.03-py3
# Build TensorRT-specific library
FROM ${TRT_BASE} AS trt-deps
RUN apt-get update \
&& apt-get install -y git build-essential cuda-nvcc-* cuda-nvtx-* libnvinfer-dev libnvinfer-plugin-dev libnvparsers-dev libnvonnxparsers-dev \
&& rm -rf /var/lib/apt/lists/*
RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \
/tensorrt_libyolo.sh
# Frigate w/ TensorRT Support as separate image
FROM deps AS tensorrt-base
#Disable S6 Global timeout
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
COPY docker/tensorrt/detector/rootfs/ /
ENV YOLO_MODELS="yolov7-320"

View File

@@ -0,0 +1,59 @@
#!/bin/bash
# For jetson platforms, build ffmpeg with custom patches. NVIDIA supplies a deb
# with accelerated decoding, but it doesn't have accelerated scaling or encoding
set -euxo pipefail
INSTALL_PREFIX=/rootfs/usr/local
apt-get -qq update
apt-get -qq install -y --no-install-recommends build-essential ccache clang cmake pkg-config
apt-get -qq install -y --no-install-recommends libx264-dev libx265-dev
pushd /tmp
# Install libnvmpi to enable nvmpi decoders (h264_nvmpi, hevc_nvmpi)
if [ -e /usr/local/cuda-10.2 ]; then
# assume Jetpack 4.X
wget -q https://developer.nvidia.com/embedded/L4T/r32_Release_v5.0/T186/Jetson_Multimedia_API_R32.5.0_aarch64.tbz2 -O jetson_multimedia_api.tbz2
else
# assume Jetpack 5.X
wget -q https://developer.nvidia.com/downloads/embedded/l4t/r35_release_v3.1/release/jetson_multimedia_api_r35.3.1_aarch64.tbz2 -O jetson_multimedia_api.tbz2
fi
tar xaf jetson_multimedia_api.tbz2 -C / && rm jetson_multimedia_api.tbz2
wget -q https://github.com/madsciencetist/jetson-ffmpeg/archive/refs/heads/master.zip
unzip master.zip && rm master.zip && cd jetson-ffmpeg-master
LD_LIBRARY_PATH=$(pwd)/stubs:$LD_LIBRARY_PATH # tegra multimedia libs aren't available in image, so use stubs for ffmpeg build
mkdir build
cd build
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$INSTALL_PREFIX
make -j$(nproc)
make install
cd ../../
# Install nv-codec-headers to enable ffnvcodec filters (scale_cuda)
wget -q https://github.com/FFmpeg/nv-codec-headers/archive/refs/heads/master.zip
unzip master.zip && rm master.zip && cd nv-codec-headers-master
make PREFIX=$INSTALL_PREFIX install
cd ../ && rm -rf nv-codec-headers-master
# Build ffmpeg with nvmpi patch
wget -q https://ffmpeg.org/releases/ffmpeg-6.0.tar.xz
tar xaf ffmpeg-*.tar.xz && rm ffmpeg-*.tar.xz && cd ffmpeg-*
patch -p1 < ../jetson-ffmpeg-master/ffmpeg_patches/ffmpeg6.0_nvmpi.patch
export PKG_CONFIG_PATH=$INSTALL_PREFIX/lib/pkgconfig
# enable Jetson codecs but disable dGPU codecs
./configure --cc='ccache gcc' --cxx='ccache g++' \
--enable-shared --disable-static --prefix=$INSTALL_PREFIX \
--enable-gpl --enable-libx264 --enable-libx265 \
--enable-nvmpi --enable-ffnvcodec --enable-cuda-llvm \
--disable-cuvid --disable-nvenc --disable-nvdec \
|| { cat ffbuild/config.log && false; }
make -j$(nproc)
make install
cd ../
rm -rf /var/lib/apt/lists/*
popd

View File

@@ -0,0 +1,28 @@
#!/bin/bash
set -euxo pipefail
mkdir -p /trt-wheels
if [[ "${TARGETARCH}" == "arm64" ]]; then
# NVIDIA supplies python-tensorrt for python3.8, but frigate uses python3.9,
# so we must build python-tensorrt ourselves.
# Get python-tensorrt source
mkdir /workspace
cd /workspace
git clone -b ${TENSORRT_VER} https://github.com/NVIDIA/TensorRT.git --depth=1
# Collect dependencies
EXT_PATH=/workspace/external && mkdir -p $EXT_PATH
pip3 install pybind11 && ln -s /usr/local/lib/python3.9/dist-packages/pybind11 $EXT_PATH/pybind11
ln -s /usr/include/python3.9 $EXT_PATH/python3.9
ln -s /usr/include/aarch64-linux-gnu/NvOnnxParser.h /workspace/TensorRT/parsers/onnx/
# Build wheel
cd /workspace/TensorRT/python
EXT_PATH=$EXT_PATH PYTHON_MAJOR_VERSION=3 PYTHON_MINOR_VERSION=9 TARGET_ARCHITECTURE=aarch64 /bin/bash ./build.sh
mv build/dist/*.whl /trt-wheels/
fi

View File

@@ -2,27 +2,35 @@
# shellcheck shell=bash
# Generate models for the TensorRT detector
# One or more comma-separated models may be specified via the YOLO_MODELS env.
# Append "-dla" to the model name to generate a DLA model with GPU fallback;
# otherwise a GPU-only model will be generated.
set -o errexit -o nounset -o pipefail
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"}
TRT_VER=${TRT_VER:-$(cat /etc/TENSORRT_VER)}
OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}"
# Create output folder
mkdir -p ${OUTPUT_FOLDER}
FIRST_MODEL=true
MODEL_DOWNLOAD=""
MODEL_CONVERT=""
for model in ${YOLO_MODELS//,/ }
do
# Remove old link in case path/version changed
rm -f ${MODEL_CACHE_DIR}/${model}.trt
if [[ ! -f ${OUTPUT_FOLDER}/${model}.trt ]]; then
if [[ ${FIRST_MODEL} = true ]]; then
MODEL_DOWNLOAD="${model%-dla}";
MODEL_CONVERT="${model}"
FIRST_MODEL=false;
else
MODEL_DOWNLOAD+=",${model%-dla}";
MODEL_CONVERT+=",${model}";
fi
else
@@ -35,19 +43,49 @@ if [[ -z ${MODEL_CONVERT} ]]; then
exit 0
fi
# On Jetpack 4.6, the nvidia container runtime will mount several host nvidia libraries into the
# container which should not be present in the image - if they are, TRT model generation will
# fail or produce invalid models. Thus we must request the user to install them on the host in
# order to run libyolo here.
# On Jetpack 5.0, these libraries are not mounted by the runtime and are supplied by the image.
if [[ "$(arch)" == "aarch64" ]]; then
if [[ ! -e /usr/lib/aarch64-linux-gnu/tegra ]]; then
echo "ERROR: Container must be launched with nvidia runtime"
exit 1
elif [[ ! -e /usr/lib/aarch64-linux-gnu/libnvinfer.so.8 ||
! -e /usr/lib/aarch64-linux-gnu/libnvinfer_plugin.so.8 ||
! -e /usr/lib/aarch64-linux-gnu/libnvparsers.so.8 ||
! -e /usr/lib/aarch64-linux-gnu/libnvonnxparser.so.8 ]]; then
echo "ERROR: Please run the following on the HOST:"
echo " sudo apt install libnvinfer8 libnvinfer-plugin8 libnvparsers8 libnvonnxparsers8 nvidia-container"
exit 1
fi
fi
echo "Generating the following TRT Models: ${MODEL_CONVERT}"
# Build trt engine
cd /usr/local/src/tensorrt_demos/yolo
# Download yolo weights
./download_yolo.sh $MODEL_CONVERT > /dev/null
echo "Downloading yolo weights"
./download_yolo.sh $MODEL_DOWNLOAD 2> /dev/null
for model in ${MODEL_CONVERT//,/ }
do
echo "Converting ${model} model"
python3 yolo_to_onnx.py -m ${model} > /dev/null
python3 onnx_to_tensorrt.py -m ${model} > /dev/null
cp ${model}.trt ${OUTPUT_FOLDER}/${model}.trt
python3 yolo_to_onnx.py -m ${model%-dla} > /dev/null
echo -e "\nGenerating ${model}.trt. This may take a few minutes.\n"; start=$(date +%s)
if [[ $model == *-dla ]]; then
cmd="python3 onnx_to_tensorrt.py -m ${model%-dla} --dla_core 0"
else
cmd="python3 onnx_to_tensorrt.py -m ${model}"
fi
$cmd > /tmp/onnx_to_tensorrt.log || { cat /tmp/onnx_to_tensorrt.log && continue; }
mv ${model%-dla}.trt ${OUTPUT_FOLDER}/${model}.trt;
ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt
echo "Generated ${model}.trt in $(($(date +%s)-start)) seconds"
done
echo "Available tensorrt models:"
cd ${OUTPUT_FOLDER} && ls *.trt;

View File

@@ -8,7 +8,10 @@ SCRIPT_DIR="/usr/local/src/tensorrt_demos"
git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download
# Build libyolo
cd ./tensorrt_demos/plugins && make all
if [ ! -e /usr/local/cuda ]; then
ln -s /usr/local/cuda-* /usr/local/cuda
fi
cd ./tensorrt_demos/plugins && make all -j$(nproc)
cp libyolo_layer.so /usr/local/lib/libyolo_layer.so
# Store yolo scripts for later conversion

View File

@@ -0,0 +1 @@
cuda-python == 11.7; platform_machine == 'aarch64'

View File

@@ -0,0 +1,3 @@
onnx == 1.9.0; platform_machine == 'aarch64'
protobuf == 3.20.3; platform_machine == 'aarch64'
numpy == 1.23.*; platform_machine == 'aarch64'

View File

@@ -1,19 +1,47 @@
variable "ARCH" {
default = "amd64"
}
variable "BASE_IMAGE" {
default = null
}
variable "SLIM_BASE" {
default = null
}
variable "TRT_BASE" {
default = null
}
target "_build_args" {
args = {
BASE_IMAGE = BASE_IMAGE,
SLIM_BASE = SLIM_BASE,
TRT_BASE = TRT_BASE
}
platforms = ["linux/${ARCH}"]
}
target wget {
dockerfile = "docker/main/Dockerfile"
target = "wget"
inherits = ["_build_args"]
}
target deps {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64"]
target = "deps"
inherits = ["_build_args"]
}
target rootfs {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64"]
target = "rootfs"
inherits = ["_build_args"]
}
target wheels {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64"]
target = "wheels"
inherits = ["_build_args"]
}
target devcontainer {
@@ -22,27 +50,45 @@ target devcontainer {
target = "devcontainer"
}
target tensorrt {
dockerfile = "docker/tensorrt/Dockerfile"
target "trt-deps" {
dockerfile = "docker/tensorrt/Dockerfile.base"
context = "."
contexts = {
deps = "target:deps",
}
inherits = ["_build_args"]
}
target "tensorrt-base" {
dockerfile = "docker/tensorrt/Dockerfile.base"
context = "."
contexts = {
deps = "target:deps",
}
inherits = ["_build_args"]
}
target "tensorrt" {
dockerfile = "docker/tensorrt/Dockerfile.${ARCH}"
context = "."
contexts = {
wget = "target:wget",
tensorrt-base = "target:tensorrt-base",
rootfs = "target:rootfs"
wheels = "target:wheels"
}
platforms = ["linux/amd64"]
target = "frigate-tensorrt"
inherits = ["_build_args"]
}
target devcontainer-trt {
dockerfile = "docker/tensorrt/Dockerfile"
target "devcontainer-trt" {
dockerfile = "docker/tensorrt/Dockerfile.amd64"
context = "."
contexts = {
deps = "target:deps",
rootfs = "target:rootfs"
wheels = "target:wheels"
wheels = "target:wheels",
trt-deps = "target:trt-deps",
devcontainer = "target:devcontainer"
}
platforms = ["linux/amd64"]
target = "devcontainer-trt"
}
}

View File

@@ -1,10 +1,26 @@
BOARDS += trt
JETPACK4_BASE ?= timongentzsch/l4t-ubuntu20-opencv:latest # L4T 32.7.1 JetPack 4.6.1
JETPACK5_BASE ?= nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime # L4T 35.3.1 JetPack 5.1.1
X86_DGPU_ARGS := ARCH=amd64
JETPACK4_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK4_BASE) SLIM_BASE=$(JETPACK4_BASE) TRT_BASE=$(JETPACK4_BASE)
JETPACK5_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK5_BASE) SLIM_BASE=$(JETPACK5_BASE) TRT_BASE=$(JETPACK5_BASE)
local-trt: version
docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt tensorrt
$(X86_DGPU_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt tensorrt
local-trt-jp4: version
$(JETPACK4_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt-jp4 tensorrt
local-trt-jp5: version
$(JETPACK5_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt-jp5 tensorrt
build-trt:
docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 tensorrt
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 tensorrt
push-trt: build-trt
docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt
$(X86_DGPU_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt
$(JETPACK4_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 tensorrt
$(JETPACK5_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 tensorrt