Upgrade TensorRT to 8.5.3 (#7006)

* Update to latest tensorrt (8.6.1) release

* Build trt libyolo_layer.so in container

* Update tensorrt_models script to convert models from the frigate container

* Fix typo in model script

* Fix paths to yolo lib and models folder

* Add S6 scripts to test and convert specified TensortRT models at startup.

Rearrange tensorrt files into a docker support folder.

* Update TensorRT documentation to reflect the new model conversion process and minimum HW support.

* Fix model_cache path to live in config directory

* Move tensorrt s6 files to the correct directory

* Fix issues in model generation script

* Disable global timeout for s6 services

* Add version folder to tensorrt model_cache path

* Include TensorRT version 8.5.3

* Add numpy requirement prior to removal of np.bool

* This TRT version uses a mixture of cuda dependencies

* Redirect stdout from noisy model conversion
This commit is contained in:
Nate Meyer
2023-07-06 15:20:33 -04:00
committed by GitHub
parent 30dfdf47d4
commit dd02958f7c
14 changed files with 125 additions and 62 deletions

View File

@@ -68,7 +68,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
libva-drm2 mesa-va-drivers
fi
apt-get purge gnupg apt-transport-https wget xz-utils -y
apt-get purge gnupg apt-transport-https xz-utils -y
apt-get clean autoclean -y
apt-get autoremove --purge -y
rm -rf /var/lib/apt/lists/*

View File

@@ -1,3 +1,4 @@
/usr/local/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cudnn/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cuda_runtime/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cublas/lib

View File

@@ -0,0 +1,53 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Generate models for the TensorRT detector
set -o errexit -o nounset -o pipefail
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"}
OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}"
# Create output folder
mkdir -p ${OUTPUT_FOLDER}
FIRST_MODEL=true
MODEL_CONVERT=""
for model in ${YOLO_MODELS//,/ }
do
# Remove old link in case path/version changed
rm -f ${MODEL_CACHE_DIR}/${model}.trt
if [[ ! -f ${OUTPUT_FOLDER}/${model}.trt ]]; then
if [[ ${FIRST_MODEL} = true ]]; then
MODEL_CONVERT="${model}"
FIRST_MODEL=false;
else
MODEL_CONVERT+=",${model}";
fi
else
ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt
fi
done
if [[ -z ${MODEL_CONVERT} ]]; then
echo "No models to convert."
exit 0
fi
echo "Generating the following TRT Models: ${MODEL_CONVERT}"
# Build trt engine
cd /usr/local/src/tensorrt_demos/yolo
# Download yolo weights
./download_yolo.sh $MODEL_CONVERT > /dev/null
for model in ${MODEL_CONVERT//,/ }
do
echo "Converting ${model} model"
python3 yolo_to_onnx.py -m ${model} > /dev/null
python3 onnx_to_tensorrt.py -m ${model} > /dev/null
cp ${model}.trt ${OUTPUT_FOLDER}/${model}.trt
ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt
done

View File

@@ -0,0 +1 @@
/etc/s6-overlay/s6-rc.d/trt-model-prepare/run

View File

@@ -0,0 +1,18 @@
#!/bin/bash
set -euxo pipefail
SCRIPT_DIR="/usr/local/src/tensorrt_demos"
# Clone tensorrt_demos repo
git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download
# Build libyolo
cd ./tensorrt_demos/plugins && make all
cp libyolo_layer.so /usr/local/lib/libyolo_layer.so
# Store yolo scripts for later conversion
cd ../
mkdir -p ${SCRIPT_DIR}/plugins
cp plugins/libyolo_layer.so ${SCRIPT_DIR}/plugins/libyolo_layer.so
cp -a yolo ${SCRIPT_DIR}/

View File

@@ -1,34 +0,0 @@
#!/bin/bash
set -euxo pipefail
CUDA_HOME=/usr/local/cuda
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64
OUTPUT_FOLDER=/tensorrt_models
echo "Generating the following TRT Models: ${YOLO_MODELS:="yolov4-tiny-288,yolov4-tiny-416,yolov7-tiny-416"}"
# Create output folder
mkdir -p ${OUTPUT_FOLDER}
# Install packages
pip install --upgrade pip && pip install onnx==1.9.0 protobuf==3.20.3
# Clone tensorrt_demos repo
git clone --depth 1 https://github.com/yeahme49/tensorrt_demos.git /tensorrt_demos
# Build libyolo
cd /tensorrt_demos/plugins && make all
cp libyolo_layer.so ${OUTPUT_FOLDER}/libyolo_layer.so
# Download yolo weights
cd /tensorrt_demos/yolo && ./download_yolo.sh
# Build trt engine
cd /tensorrt_demos/yolo
for model in ${YOLO_MODELS//,/ }
do
python3 yolo_to_onnx.py -m ${model}
python3 onnx_to_tensorrt.py -m ${model}
cp /tensorrt_demos/yolo/${model}.trt ${OUTPUT_FOLDER}/${model}.trt;
done