Upgrade TensorRT to 8.5.3 (#7006)

* Update to latest tensorrt (8.6.1) release

* Build trt libyolo_layer.so in container

* Update tensorrt_models script to convert models from the frigate container

* Fix typo in model script

* Fix paths to yolo lib and models folder

* Add S6 scripts to test and convert specified TensortRT models at startup.

Rearrange tensorrt files into a docker support folder.

* Update TensorRT documentation to reflect the new model conversion process and minimum HW support.

* Fix model_cache path to live in config directory

* Move tensorrt s6 files to the correct directory

* Fix issues in model generation script

* Disable global timeout for s6 services

* Add version folder to tensorrt model_cache path

* Include TensorRT version 8.5.3

* Add numpy requirement prior to removal of np.bool

* This TRT version uses a mixture of cuda dependencies

* Redirect stdout from noisy model conversion
This commit is contained in:
Nate Meyer
2023-07-06 15:20:33 -04:00
committed by GitHub
parent 30dfdf47d4
commit dd02958f7c
14 changed files with 125 additions and 62 deletions

View File

@@ -0,0 +1,6 @@
/usr/local/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cudnn/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cuda_runtime/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cublas/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cuda_nvrtc/lib
/usr/local/lib/python3.9/dist-packages/tensorrt

View File

@@ -0,0 +1,53 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Generate models for the TensorRT detector
set -o errexit -o nounset -o pipefail
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"}
OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}"
# Create output folder
mkdir -p ${OUTPUT_FOLDER}
FIRST_MODEL=true
MODEL_CONVERT=""
for model in ${YOLO_MODELS//,/ }
do
# Remove old link in case path/version changed
rm -f ${MODEL_CACHE_DIR}/${model}.trt
if [[ ! -f ${OUTPUT_FOLDER}/${model}.trt ]]; then
if [[ ${FIRST_MODEL} = true ]]; then
MODEL_CONVERT="${model}"
FIRST_MODEL=false;
else
MODEL_CONVERT+=",${model}";
fi
else
ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt
fi
done
if [[ -z ${MODEL_CONVERT} ]]; then
echo "No models to convert."
exit 0
fi
echo "Generating the following TRT Models: ${MODEL_CONVERT}"
# Build trt engine
cd /usr/local/src/tensorrt_demos/yolo
# Download yolo weights
./download_yolo.sh $MODEL_CONVERT > /dev/null
for model in ${MODEL_CONVERT//,/ }
do
echo "Converting ${model} model"
python3 yolo_to_onnx.py -m ${model} > /dev/null
python3 onnx_to_tensorrt.py -m ${model} > /dev/null
cp ${model}.trt ${OUTPUT_FOLDER}/${model}.trt
ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt
done

View File

@@ -0,0 +1 @@
/etc/s6-overlay/s6-rc.d/trt-model-prepare/run

View File

@@ -0,0 +1,18 @@
#!/bin/bash
set -euxo pipefail
SCRIPT_DIR="/usr/local/src/tensorrt_demos"
# Clone tensorrt_demos repo
git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download
# Build libyolo
cd ./tensorrt_demos/plugins && make all
cp libyolo_layer.so /usr/local/lib/libyolo_layer.so
# Store yolo scripts for later conversion
cd ../
mkdir -p ${SCRIPT_DIR}/plugins
cp plugins/libyolo_layer.so ${SCRIPT_DIR}/plugins/libyolo_layer.so
cp -a yolo ${SCRIPT_DIR}/