From 416da513027e60d82080c2c70f4d944e134183ca Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 31 Dec 2024 07:13:19 -0700 Subject: [PATCH] Update to trt 10 --- docker/tensorrt/Dockerfile.base | 17 +++++++++-------- docker/tensorrt/requirements-amd64.txt | 10 +++------- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/docker/tensorrt/Dockerfile.base b/docker/tensorrt/Dockerfile.base index f9cdde587..c03781cad 100644 --- a/docker/tensorrt/Dockerfile.base +++ b/docker/tensorrt/Dockerfile.base @@ -3,18 +3,19 @@ # https://askubuntu.com/questions/972516/debian-frontend-environment-variable ARG DEBIAN_FRONTEND=noninteractive -ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.12-py3 +ARG TRT_BASE=nvcr.io/nvidia/tensorrt:24.10-py3 # Build TensorRT-specific library FROM ${TRT_BASE} AS trt-deps ARG COMPUTE_LEVEL -RUN apt-get update \ - && apt-get install -y git build-essential cuda-nvcc-* cuda-nvtx-* libnvinfer-dev libnvinfer-plugin-dev libnvparsers-dev libnvonnxparsers-dev \ - && rm -rf /var/lib/apt/lists/* -RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \ - /tensorrt_libyolo.sh +# Need to wait for script to be adapted to newer version of tensorrt or perhaps decide that we want to remove the TRT detector in favor of using onnx runtime directly +#RUN apt-get update \ +# && apt-get install -y git build-essential cuda-nvcc-* cuda-nvtx-* libnvinfer-dev libnvinfer-plugin-dev libnvparsers-dev libnvonnxparsers-dev \ +# && rm -rf /var/lib/apt/lists/* +#RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \ +# /tensorrt_libyolo.sh # Frigate w/ TensorRT Support as separate image FROM deps AS tensorrt-base @@ -22,8 +23,8 @@ FROM deps AS tensorrt-base #Disable S6 Global timeout ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 -COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so -COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos +#COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so +#COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos COPY --from=trt-deps /usr/local/cuda-12.* /usr/local/cuda COPY docker/tensorrt/detector/rootfs/ / ENV YOLO_MODELS="" diff --git a/docker/tensorrt/requirements-amd64.txt b/docker/tensorrt/requirements-amd64.txt index fc919aaff..8ae9a1c87 100644 --- a/docker/tensorrt/requirements-amd64.txt +++ b/docker/tensorrt/requirements-amd64.txt @@ -1,14 +1,10 @@ # NVidia TensorRT Support (amd64 only) --extra-index-url 'https://pypi.nvidia.com' numpy < 1.24; platform_machine == 'x86_64' -tensorrt == 8.6.1.*; platform_machine == 'x86_64' -cuda-python == 11.8.*; platform_machine == 'x86_64' +tensorrt == 10.5.0; platform_machine == 'x86_64' +cuda-python == 12.6.*; platform_machine == 'x86_64' cython == 3.0.*; platform_machine == 'x86_64' nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64' -nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64' -nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64' -nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64' -nvidia-cufft-cu11==10.*; platform_machine == 'x86_64' onnx==1.16.*; platform_machine == 'x86_64' -onnxruntime-gpu==1.18.*; platform_machine == 'x86_64' +onnxruntime-gpu==1.20.*; platform_machine == 'x86_64' protobuf==3.20.3; platform_machine == 'x86_64'