Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
c71bb53e8b Update uvicorn requirement from ==0.30.* to ==0.34.* in /docker/main
Updates the requirements on [uvicorn](https://github.com/encode/uvicorn) to permit the latest version.
- [Release notes](https://github.com/encode/uvicorn/releases)
- [Changelog](https://github.com/encode/uvicorn/blob/master/CHANGELOG.md)
- [Commits](https://github.com/encode/uvicorn/compare/0.30.0...0.34.0)

---
updated-dependencies:
- dependency-name: uvicorn
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-16 12:06:49 +00:00
148 changed files with 2834 additions and 9267 deletions

View File

@@ -2,7 +2,6 @@ aarch
absdiff
airockchip
Alloc
alpr
Amcrest
amdgpu
analyzeduration
@@ -62,7 +61,6 @@ dsize
dtype
ECONNRESET
edgetpu
facenet
fastapi
faststart
fflags
@@ -116,8 +114,6 @@ itemsize
Jellyfin
jetson
jetsons
jina
jinaai
joserfc
jsmpeg
jsonify
@@ -191,7 +187,6 @@ openai
opencv
openvino
OWASP
paddleocr
paho
passwordless
popleft
@@ -313,4 +308,4 @@ yolo
yolonas
yolox
zeep
zerolatency
zerolatency

View File

@@ -75,6 +75,15 @@ jobs:
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
- name: Build and push Rockchip build
uses: docker/bake-action@v3
with:
push: true
targets: rk
files: docker/rockchip/rk.hcl
set: |
rk.tags=${{ steps.setup.outputs.image-name }}-rk
*.cache-from=type=gha
jetson_jp4_build:
runs-on: ubuntu-latest
name: Jetson Jetpack 4

View File

@@ -6,7 +6,7 @@ on:
- "docs/**"
env:
DEFAULT_PYTHON: 3.11
DEFAULT_PYTHON: 3.9
jobs:
build_devcontainer:
@@ -76,7 +76,7 @@ jobs:
with:
persist-credentials: false
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
uses: actions/setup-python@v5.3.0
uses: actions/setup-python@v5.1.0
with:
python-version: ${{ env.DEFAULT_PYTHON }}
- name: Install requirements

View File

@@ -1,7 +1,7 @@
default_target: local
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
VERSION = 0.16.0
VERSION = 0.15.0
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
BOARDS= #Initialized empty

View File

@@ -61,7 +61,7 @@ def start(id, num_detections, detection_queue, event):
object_detector.cleanup()
print(f"{id} - Processed for {duration:.2f} seconds.")
print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
print(f"{id} - Average frame processing time: {mean(frame_times) * 1000:.2f}ms")
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
######

View File

@@ -5,7 +5,6 @@ ARG DEBIAN_FRONTEND=noninteractive
# Build Python wheels
FROM wheels AS h8l-wheels
RUN python3 -m pip config set global.break-system-packages true
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
COPY docker/hailo8l/requirements-wheels-h8l.txt /requirements-wheels-h8l.txt
@@ -31,7 +30,6 @@ COPY --from=hailort /hailo-wheels /deps/hailo-wheels
COPY --from=hailort /rootfs/ /
# Install the wheels
RUN python3 -m pip config set global.break-system-packages true
RUN pip3 install -U /deps/h8l-wheels/*.whl
RUN pip3 install -U /deps/hailo-wheels/*.whl

View File

@@ -2,7 +2,7 @@
set -euxo pipefail
hailo_version="4.20.0"
hailo_version="4.19.0"
if [[ "${TARGETARCH}" == "amd64" ]]; then
arch="x86_64"
@@ -15,5 +15,5 @@ wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_ver
mkdir -p /hailo-wheels
wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl"
wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp39-cp39-linux_${arch}.whl"

View File

@@ -1,12 +1,12 @@
appdirs==1.4.*
argcomplete==2.0.*
contextlib2==0.6.*
distlib==0.3.*
filelock==3.8.*
future==0.18.*
importlib-metadata==5.1.*
importlib-resources==5.1.*
netaddr==0.8.*
netifaces==0.10.*
verboselogs==1.7.*
virtualenv==20.17.*
appdirs==1.4.4
argcomplete==2.0.0
contextlib2==0.6.0.post1
distlib==0.3.6
filelock==3.8.0
future==0.18.2
importlib-metadata==5.1.0
importlib-resources==5.1.2
netaddr==0.8.0
netifaces==0.10.9
verboselogs==1.7
virtualenv==20.17.0

View File

@@ -4,7 +4,6 @@
sudo apt-get update
sudo apt-get install -y build-essential cmake git wget
hailo_version="4.20.0"
arch=$(uname -m)
if [[ $arch == "x86_64" ]]; then
@@ -14,7 +13,7 @@ else
fi
# Clone the HailoRT driver repository
git clone --depth 1 --branch v${hailo_version} https://github.com/hailo-ai/hailort-drivers.git
git clone --depth 1 --branch v4.19.0 https://github.com/hailo-ai/hailort-drivers.git
# Build and install the HailoRT driver
cd hailort-drivers/linux/pcie

View File

@@ -3,12 +3,12 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
ARG BASE_IMAGE=debian:12
ARG SLIM_BASE=debian:12-slim
ARG BASE_IMAGE=debian:11
ARG SLIM_BASE=debian:11-slim
FROM ${BASE_IMAGE} AS base
FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
FROM --platform=${BUILDPLATFORM} debian:11 AS base_host
FROM ${SLIM_BASE} AS slim-base
@@ -66,8 +66,8 @@ COPY docker/main/requirements-ov.txt /requirements-ov.txt
RUN apt-get -qq update \
&& apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
&& wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& python3 get-pip.py "pip" --break-system-packages \
&& pip install --break-system-packages -r /requirements-ov.txt
&& python3 get-pip.py "pip" \
&& pip install -r /requirements-ov.txt
# Get OpenVino Model
RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
@@ -139,17 +139,24 @@ ARG TARGETARCH
# Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \
&& apt-get -qq install -y \
apt-transport-https wget \
apt-transport-https \
gnupg \
wget \
# the key fingerprint can be obtained from https://ftp-master.debian.org/keys.html
&& wget -qO- "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xA4285295FC7B1A81600062A9605C66F00D6C9793" | \
gpg --dearmor > /usr/share/keyrings/debian-archive-bullseye-stable.gpg \
&& echo "deb [signed-by=/usr/share/keyrings/debian-archive-bullseye-stable.gpg] http://deb.debian.org/debian bullseye main contrib non-free" | \
tee /etc/apt/sources.list.d/debian-bullseye-nonfree.list \
&& apt-get -qq update \
&& apt-get -qq install -y \
python3 \
python3-dev \
python3.9 \
python3.9-dev \
# opencv dependencies
build-essential cmake git pkg-config libgtk-3-dev \
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev \
gfortran openexr libatlas-base-dev libssl-dev\
libtbbmalloc2 libtbb-dev libdc1394-dev libopenexr-dev \
libtbb2 libtbb-dev libdc1394-22-dev libopenexr-dev \
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
# sqlite3 dependencies
tclsh \
@@ -157,11 +164,14 @@ RUN apt-get -qq update \
gcc gfortran libopenblas-dev liblapack-dev && \
rm -rf /var/lib/apt/lists/*
# Ensure python3 defaults to python3.9
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& python3 get-pip.py "pip" --break-system-packages
&& python3 get-pip.py "pip"
COPY docker/main/requirements.txt /requirements.txt
RUN pip3 install -r /requirements.txt --break-system-packages
RUN pip3 install -r /requirements.txt
# Build pysqlite3 from source
COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
@@ -212,8 +222,8 @@ RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_de
/deps/install_deps.sh
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
python3 -m pip install --upgrade pip --break-system-packages && \
pip3 install -U /deps/wheels/*.whl --break-system-packages
python3 -m pip install --upgrade pip && \
pip3 install -U /deps/wheels/*.whl
COPY --from=deps-rootfs / /
@@ -260,7 +270,7 @@ RUN apt-get update \
&& rm -rf /var/lib/apt/lists/*
RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \
pip3 install -r requirements-dev.txt --break-system-packages
pip3 install -r requirements-dev.txt
HEALTHCHECK NONE

View File

@@ -8,7 +8,8 @@ SECURE_TOKEN_MODULE_VERSION="1.5"
SET_MISC_MODULE_VERSION="v0.33"
NGX_DEVEL_KIT_VERSION="v0.3.3"
sed -i '/^Types:/s/deb/& deb-src/' /etc/apt/sources.list.d/debian.sources
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
apt-get update
apt-get -yqq build-dep nginx

View File

@@ -4,7 +4,7 @@ from openvino.tools import mo
ov_model = mo.convert_model(
"/models/ssdlite_mobilenet_v2_coco_2018_05_09/frozen_inference_graph.pb",
compress_to_fp16=True,
transformations_config="/usr/local/lib/python3.11/dist-packages/openvino/tools/mo/front/tf/ssd_v2_support.json",
transformations_config="/usr/local/lib/python3.9/dist-packages/openvino/tools/mo/front/tf/ssd_v2_support.json",
tensorflow_object_detection_api_pipeline_config="/models/ssdlite_mobilenet_v2_coco_2018_05_09/pipeline.config",
reverse_input_channels=True,
)

View File

@@ -4,7 +4,8 @@ set -euxo pipefail
SQLITE_VEC_VERSION="0.1.3"
sed -i '/^Types:/s/deb/& deb-src/' /etc/apt/sources.list.d/debian.sources
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
apt-get update
apt-get -yqq build-dep sqlite3 gettext git

View File

@@ -11,34 +11,33 @@ apt-get -qq install --no-install-recommends -y \
lbzip2 \
procps vainfo \
unzip locales tzdata libxml2 xz-utils \
python3 \
python3.9 \
python3-pip \
curl \
lsof \
jq \
nethogs \
libgl1 \
libglib2.0-0 \
libusb-1.0.0
nethogs
# ensure python3 defaults to python3.9
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
mkdir -p -m 600 /root/.gnupg
# install coral runtime
wget -q -O /tmp/libedgetpu1-max.deb "https://github.com/feranick/libedgetpu/releases/download/16.0TF2.17.0-1/libedgetpu1-max_16.0tf2.17.0-1.bookworm_${TARGETARCH}.deb"
unset DEBIAN_FRONTEND
yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive
rm /tmp/libedgetpu1-max.deb
# add coral repo
curl -fsSLo - https://packages.cloud.google.com/apt/doc/apt-key.gpg | \
gpg --dearmor -o /etc/apt/trusted.gpg.d/google-cloud-packages-archive-keyring.gpg
echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | tee /etc/apt/sources.list.d/coral-edgetpu.list
echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections
# install python3 & tflite runtime
if [[ "${TARGETARCH}" == "amd64" ]]; then
pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.0/tflite_runtime-2.17.0-cp311-cp311-linux_x86_64.whl
pip3 install --break-system-packages https://github.com/feranick/pycoral/releases/download/2.0.2TF2.17.0/pycoral-2.0.2-cp311-cp311-linux_x86_64.whl
# enable non-free repo in Debian
if grep -q "Debian" /etc/issue; then
sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list
fi
if [[ "${TARGETARCH}" == "arm64" ]]; then
pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.0/tflite_runtime-2.17.0-cp311-cp311-linux_aarch64.whl
pip3 install --break-system-packages https://github.com/feranick/pycoral/releases/download/2.0.2TF2.17.0/pycoral-2.0.2-cp311-cp311-linux_aarch64.whl
fi
# coral drivers
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y \
libedgetpu1-max python3-tflite-runtime python3-pycoral
# btbn-ffmpeg -> amd64
if [[ "${TARGETARCH}" == "amd64" ]]; then
@@ -66,15 +65,23 @@ fi
# arch specific packages
if [[ "${TARGETARCH}" == "amd64" ]]; then
# install amd / intel-i965 driver packages
# use debian bookworm for amd / intel-i965 driver packages
echo 'deb https://deb.debian.org/debian bookworm main contrib non-free' >/etc/apt/sources.list.d/debian-bookworm.list
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y \
i965-va-driver intel-gpu-tools onevpl-tools \
libva-drm2 \
mesa-va-drivers radeontop
# something about this dependency requires it to be installed in a separate call rather than in the line above
apt-get -qq install --no-install-recommends --no-install-suggests -y \
i965-va-driver-shaders
# intel packages use zst compression so we need to update dpkg
apt-get install -y dpkg
rm -f /etc/apt/sources.list.d/debian-bookworm.list
# use intel apt intel packages
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list

View File

@@ -4,16 +4,16 @@ aiohttp == 3.11.2
starlette == 0.41.2
starlette-context == 0.3.6
fastapi == 0.115.*
uvicorn == 0.30.*
uvicorn == 0.34.*
slowapi == 0.1.*
imutils == 0.5.*
joserfc == 1.0.*
pathvalidate == 3.2.*
markupsafe == 2.1.*
python-multipart == 0.0.12
# General
mypy == 1.6.1
onvif-zeep-async == 3.1.*
numpy == 1.26.*
onvif_zeep == 0.2.12
opencv-python-headless == 4.9.0.*
paho-mqtt == 2.1.*
pandas == 2.2.*
peewee == 3.17.*
@@ -27,19 +27,15 @@ ruamel.yaml == 0.18.*
tzlocal == 5.2
requests == 2.32.*
types-requests == 2.32.*
scipy == 1.13.*
norfair == 2.2.*
setproctitle == 1.3.*
ws4py == 0.5.*
unidecode == 1.3.*
# Image Manipulation
numpy == 1.26.*
opencv-python-headless == 4.10.0.*
opencv-contrib-python == 4.9.0.*
scipy == 1.14.*
# OpenVino & ONNX
openvino == 2024.4.*
onnxruntime-openvino == 1.20.* ; platform_machine == 'x86_64'
onnxruntime == 1.20.* ; platform_machine == 'aarch64'
openvino == 2024.3.*
onnxruntime-openvino == 1.19.* ; platform_machine == 'x86_64'
onnxruntime == 1.19.* ; platform_machine == 'aarch64'
# Embeddings
transformers == 4.45.*
# Generative AI
@@ -49,6 +45,3 @@ openai == 1.51.*
# push notifications
py-vapid == 1.9.*
pywebpush == 2.0.*
# alpr
pyclipper == 1.3.*
shapely == 2.0.*

View File

@@ -1,2 +1,2 @@
scikit-build == 0.18.*
scikit-build == 0.17.*
nvidia-pyindex

View File

@@ -81,9 +81,6 @@ http {
open_file_cache_errors on;
aio on;
# file upload size
client_max_body_size 10M;
# https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool
vod_open_file_thread_pool default;

View File

@@ -1,20 +0,0 @@
./subset/000000005001.jpg
./subset/000000038829.jpg
./subset/000000052891.jpg
./subset/000000075612.jpg
./subset/000000098261.jpg
./subset/000000181542.jpg
./subset/000000215245.jpg
./subset/000000277005.jpg
./subset/000000288685.jpg
./subset/000000301421.jpg
./subset/000000334371.jpg
./subset/000000348481.jpg
./subset/000000373353.jpg
./subset/000000397681.jpg
./subset/000000414673.jpg
./subset/000000419312.jpg
./subset/000000465822.jpg
./subset/000000475732.jpg
./subset/000000559707.jpg
./subset/000000574315.jpg

Binary file not shown.

Before

Width:  |  Height:  |  Size: 207 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 209 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 150 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 102 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 201 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 233 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 242 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 230 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 113 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 281 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 272 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 152 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 166 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 103 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 203 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 110 KiB

View File

@@ -7,26 +7,21 @@ FROM wheels as rk-wheels
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
RUN sed -i "/onnxruntime/d" /requirements-wheels.txt
RUN python3 -m pip config set global.break-system-packages true
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
RUN rm -rf /rk-wheels/opencv_python-*
FROM deps AS rk-frigate
ARG TARGETARCH
RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
pip3 install --no-deps -U /deps/rk-wheels/*.whl --break-system-packages
pip3 install -U /deps/rk-wheels/*.whl
WORKDIR /opt/frigate/
COPY --from=rootfs / /
COPY docker/rockchip/COCO /COCO
COPY docker/rockchip/conv2rknn.py /opt/conv2rknn.py
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/librknnrt.so /usr/lib/
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/librknnrt.so /usr/lib/
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-6/ffmpeg /usr/lib/ffmpeg/6.0/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-6/ffprobe /usr/lib/ffmpeg/6.0/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffmpeg /usr/lib/ffmpeg/6.0/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffprobe /usr/lib/ffmpeg/6.0/bin/
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}"

View File

@@ -1,82 +0,0 @@
import os
import rknn
import yaml
from rknn.api import RKNN
try:
with open(rknn.__path__[0] + "/VERSION") as file:
tk_version = file.read().strip()
except FileNotFoundError:
pass
try:
with open("/config/conv2rknn.yaml", "r") as config_file:
configuration = yaml.safe_load(config_file)
except FileNotFoundError:
raise Exception("Please place a config.yaml file in /config/conv2rknn.yaml")
if configuration["config"] != None:
rknn_config = configuration["config"]
else:
rknn_config = {}
if not os.path.isdir("/config/model_cache/rknn_cache/onnx"):
raise Exception(
"Place the onnx models you want to convert to rknn format in /config/model_cache/rknn_cache/onnx"
)
if "soc" not in configuration:
try:
with open("/proc/device-tree/compatible") as file:
soc = file.read().split(",")[-1].strip("\x00")
except FileNotFoundError:
raise Exception("Make sure to run docker in privileged mode.")
configuration["soc"] = [
soc,
]
if "quantization" not in configuration:
configuration["quantization"] = False
if "output_name" not in configuration:
configuration["output_name"] = "{{input_basename}}"
for input_filename in os.listdir("/config/model_cache/rknn_cache/onnx"):
for soc in configuration["soc"]:
quant = "i8" if configuration["quantization"] else "fp16"
input_path = "/config/model_cache/rknn_cache/onnx/" + input_filename
input_basename = input_filename[: input_filename.rfind(".")]
output_filename = (
configuration["output_name"].format(
quant=quant,
input_basename=input_basename,
soc=soc,
tk_version=tk_version,
)
+ ".rknn"
)
output_path = "/config/model_cache/rknn_cache/" + output_filename
rknn_config["target_platform"] = soc
rknn = RKNN(verbose=True)
rknn.config(**rknn_config)
if rknn.load_onnx(model=input_path) != 0:
raise Exception("Error loading model.")
if (
rknn.build(
do_quantization=configuration["quantization"],
dataset="/COCO/coco_subset_20.txt",
)
!= 0
):
raise Exception("Error building model.")
if rknn.export_rknn(output_path) != 0:
raise Exception("Error exporting rknn model.")

View File

@@ -1,2 +1 @@
rknn-toolkit2 == 2.3.0
rknn-toolkit-lite2 == 2.3.0
rknn-toolkit-lite2 @ https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/rknn_toolkit_lite2-2.0.0b0-cp39-cp39-linux_aarch64.whl

View File

@@ -34,7 +34,7 @@ RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/
RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf
#######################################################################
FROM --platform=linux/amd64 debian:12 as debian-base
FROM --platform=linux/amd64 debian:11 as debian-base
RUN apt-get update && apt-get -y upgrade
RUN apt-get -y install --no-install-recommends libelf1 libdrm2 libdrm-amdgpu1 libnuma1 kmod
@@ -51,7 +51,7 @@ COPY --from=rocm /opt/rocm-$ROCM /opt/rocm-$ROCM
RUN ln -s /opt/rocm-$ROCM /opt/rocm
RUN apt-get -y install g++ cmake
RUN apt-get -y install python3-pybind11 python3-distutils python3-dev
RUN apt-get -y install python3-pybind11 python3.9-distutils python3-dev
WORKDIR /opt/build
@@ -70,11 +70,10 @@ RUN apt-get -y install libnuma1
WORKDIR /opt/frigate/
COPY --from=rootfs / /
# Temporarily disabled to see if a new wheel can be built to support py3.11
#COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
#RUN python3 -m pip install --upgrade pip \
# && pip3 uninstall -y onnxruntime-openvino \
# && pip3 install -r /requirements.txt
COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
RUN python3 -m pip install --upgrade pip \
&& pip3 uninstall -y onnxruntime-openvino \
&& pip3 install -r /requirements.txt
#######################################################################
FROM scratch AS rocm-dist
@@ -87,12 +86,12 @@ COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
COPY --from=rocm /opt/rocm-dist/ /
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-311-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-39-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
#######################################################################
FROM deps-prelim AS rocm-prelim-hsa-override0
\
ENV HSA_ENABLE_SDMA=0
ENV HSA_ENABLE_SDMA=0
COPY --from=rocm-dist / /

View File

@@ -24,7 +24,7 @@ sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list
if [[ "${TARGETARCH}" == "arm64" ]]; then
# add raspberry pi repo
gpg --no-default-keyring --keyring /usr/share/keyrings/raspbian.gpg --keyserver keyserver.ubuntu.com --recv-keys 82B129927FA3303E
echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bookworm main" | tee /etc/apt/sources.list.d/raspi.list
echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bullseye main" | tee /etc/apt/sources.list.d/raspi.list
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg
fi

View File

@@ -7,19 +7,33 @@ ARG DEBIAN_FRONTEND=noninteractive
FROM wheels as trt-wheels
ARG DEBIAN_FRONTEND
ARG TARGETARCH
RUN python3 -m pip config set global.break-system-packages true
# Add TensorRT wheels to another folder
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
FROM tensorrt-base AS frigate-tensorrt
ENV TRT_VER=8.6.1
RUN python3 -m pip config set global.break-system-packages true
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl --break-system-packages && \
ldconfig
# Build CuDNN
FROM wget AS cudnn-deps
ARG COMPUTE_LEVEL
RUN apt-get update \
&& apt-get install -y git build-essential
RUN wget https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.1-1_all.deb \
&& dpkg -i cuda-keyring_1.1-1_all.deb \
&& apt-get update \
&& apt-get -y install cuda-toolkit \
&& rm -rf /var/lib/apt/lists/*
FROM tensorrt-base AS frigate-tensorrt
ENV TRT_VER=8.5.3
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl && \
ldconfig
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
ENV LD_LIBRARY_PATH=/usr/local/lib/python3.9/dist-packages/tensorrt:/usr/local/cuda/lib64:/usr/local/lib/python3.9/dist-packages/nvidia/cufft/lib
WORKDIR /opt/frigate/
COPY --from=rootfs / /
@@ -28,8 +42,8 @@ FROM devcontainer AS devcontainer-trt
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
COPY --from=trt-deps /usr/local/cuda-12.1 /usr/local/cuda
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
COPY docker/tensorrt/detector/rootfs/ /
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl --break-system-packages
pip3 install -U /deps/trt-wheels/*.whl

View File

@@ -41,11 +41,11 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,t
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
ADD https://nvidia.box.com/shared/static/psl23iw3bh7hlgku0mjo1xekxpego3e3.whl /tmp/onnxruntime_gpu-1.15.1-cp311-cp311-linux_aarch64.whl
ADD https://nvidia.box.com/shared/static/9aemm4grzbbkfaesg5l7fplgjtmswhj8.whl /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
RUN pip3 uninstall -y onnxruntime-openvino \
&& pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt \
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp311-cp311-linux_aarch64.whl
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
FROM build-wheels AS trt-model-wheels
ARG DEBIAN_FRONTEND

View File

@@ -3,7 +3,7 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.12-py3
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.03-py3
# Build TensorRT-specific library
FROM ${TRT_BASE} AS trt-deps
@@ -24,7 +24,6 @@ ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
COPY --from=trt-deps /usr/local/cuda-12.* /usr/local/cuda
COPY docker/tensorrt/detector/rootfs/ /
ENV YOLO_MODELS=""

View File

@@ -1,8 +1,6 @@
/usr/local/lib
/usr/local/cuda/lib64
/usr/local/lib/python3.11/dist-packages/nvidia/cudnn/lib
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_runtime/lib
/usr/local/lib/python3.11/dist-packages/nvidia/cublas/lib
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_nvrtc/lib
/usr/local/lib/python3.11/dist-packages/tensorrt
/usr/local/lib/python3.11/dist-packages/nvidia/cufft/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cudnn/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cuda_runtime/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cublas/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cuda_nvrtc/lib
/usr/local/lib/python3.9/dist-packages/tensorrt

View File

@@ -1,9 +1,9 @@
# NVidia TensorRT Support (amd64 only)
--extra-index-url 'https://pypi.nvidia.com'
numpy < 1.24; platform_machine == 'x86_64'
tensorrt == 8.6.1.*; platform_machine == 'x86_64'
cuda-python == 11.8.*; platform_machine == 'x86_64'
cython == 3.0.*; platform_machine == 'x86_64'
tensorrt == 8.5.3.*; platform_machine == 'x86_64'
cuda-python == 11.8; platform_machine == 'x86_64'
cython == 0.29.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'

View File

@@ -41,7 +41,6 @@ cameras:
...
onvif:
# Required: host of the camera being connected to.
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
host: 0.0.0.0
# Optional: ONVIF port for device (default: shown below).
port: 8000
@@ -50,8 +49,6 @@ cameras:
user: admin
# Optional: password for login.
password: admin
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
tls_insecure: False
# Optional: PTZ camera object autotracking. Keeps a moving object in
# the center of the frame by automatically moving the PTZ camera.
autotracking:

View File

@@ -67,15 +67,14 @@ ffmpeg:
### Annke C800
This camera is H.265 only. To be able to play clips on some devices (like MacOs or iPhone) the H.265 stream has to be adjusted using the `apple_compatibility` config.
This camera is H.265 only. To be able to play clips on some devices (like MacOs or iPhone) the H.265 stream has to be repackaged and the audio stream has to be converted to aac. Unfortunately direct playback of in the browser is not working (yet), but the downloaded clip can be played locally.
```yaml
cameras:
annkec800: # <------ Name the camera
ffmpeg:
apple_compatibility: true # <- Adds compatibility with MacOS and iPhone
output_args:
record: preset-record-generic-audio-aac
record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -tag:v hvc1 -bsf:v hevc_mp4toannexb -c:a aac
inputs:
- path: rtsp://user:password@camera-ip:554/H264/ch1/main/av_stream # <----- Update for your camera
@@ -157,9 +156,7 @@ cameras:
#### Reolink Doorbell
The reolink doorbell supports two way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
Ensure HTTP is enabled in the camera's advanced network settings. To use two way talk with Frigate, see the [Live view documentation](/configuration/live#two-way-talk).
The reolink doorbell supports 2-way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
```yaml
go2rtc:

View File

@@ -1,35 +0,0 @@
---
id: face_recognition
title: Face Recognition
---
Face recognition allows people to be assigned names and when their face is recognized Frigate will assign the person's name as a sub label. This information is included in the UI, filters, as well as in notifications.
Frigate has support for FaceNet to create face embeddings, which runs locally. Embeddings are then saved to Frigate's database.
## Minimum System Requirements
Face recognition works by running a large AI model locally on your system. Systems without a GPU will not run Face Recognition reliably or at all.
## Configuration
Face recognition is disabled by default and requires semantic search to be enabled, face recognition must be enabled in your config file before it can be used. Semantic Search and face recognition are global configuration settings.
```yaml
face_recognition:
enabled: true
```
## Dataset
The number of images needed for a sufficient training set for face recognition varies depending on several factors:
- Complexity of the task: A simple task like recognizing faces of known individuals may require fewer images than a complex task like identifying unknown individuals in a large crowd.
- Diversity of the dataset: A dataset with diverse images, including variations in lighting, pose, and facial expressions, will require fewer images per person than a less diverse dataset.
- Desired accuracy: The higher the desired accuracy, the more images are typically needed.
However, here are some general guidelines:
- Minimum: For basic face recognition tasks, a minimum of 10-20 images per person is often recommended.
- Recommended: For more robust and accurate systems, 30-50 images per person is a good starting point.
- Ideal: For optimal performance, especially in challenging conditions, 100 or more images per person can be beneficial.

View File

@@ -5,8 +5,6 @@ title: Generative AI
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle. Descriptions can also be regenerated manually via the Frigate UI.
:::info
Semantic Search must be enabled to use Generative AI.

View File

@@ -175,16 +175,6 @@ For more information on the various values across different distributions, see h
Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=2 >> /etc/sysctl.d/local.conf'`
#### Stats for SR-IOV devices
When using virtualized GPUs via SR-IOV, additional args are needed for GPU stats to function. This can be enabled with the following config:
```yaml
telemetry:
stats:
sriov: True
```
## AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.

View File

@@ -203,13 +203,14 @@ detectors:
ov:
type: openvino
device: AUTO
model:
path: /openvino-model/ssdlite_mobilenet_v2.xml
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
path: /openvino-model/ssdlite_mobilenet_v2.xml
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
record:

View File

@@ -1,45 +0,0 @@
---
id: license_plate_recognition
title: License Plate Recognition (LPR)
---
Frigate can recognize license plates on vehicles and automatically add the detected characters as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street with a dedicated LPR camera.
Users running a Frigate+ model should ensure that `license_plate` is added to the [list of objects to track](https://docs.frigate.video/plus/#available-label-types) either globally or for a specific camera. This will improve the accuracy and performance of the LPR model.
LPR is most effective when the vehicles license plate is fully visible to the camera. For moving vehicles, Frigate will attempt to read the plate continuously, refining its detection and keeping the most confident result. LPR will not run on stationary vehicles.
## Minimum System Requirements
License plate recognition works by running AI models locally on your system. The models are relatively lightweight and run on your CPU. At least 4GB of RAM is required.
## Configuration
License plate recognition is disabled by default. Enable it in your config file:
```yaml
lpr:
enabled: true
```
## Advanced Configuration
Several options are available to fine-tune the LPR feature. For example, you can adjust the `min_area` setting, which defines the minimum size in pixels a license plate must be before LPR runs. The default is 500 pixels.
Additionally, you can define `known_plates` as strings or regular expressions, allowing Frigate to label tracked vehicles with custom sub_labels when a recognized plate is detected. This information is then accessible in the UI, filters, and notifications.
```yaml
lpr:
enabled: true
min_area: 500
known_plates:
Wife's Car:
- "ABC-1234"
- "ABC-I234"
Johnny:
- "J*N-*234" # Using wildcards for H/M and 1/I
Sally:
- "[S5]LL-1234" # Matches SLL-1234 and 5LL-1234
```
In this example, "Wife's Car" will appear as the label for any vehicle matching the plate "ABC-1234." The model might occasionally interpret the digit 1 as a capital I (e.g., "ABC-I234"), so both variations are listed. Similarly, multiple possible variations are specified for Johnny and Sally.

View File

@@ -29,7 +29,7 @@ The default video and audio codec on your camera may not always be compatible wi
### Audio Support
MSE Requires PCMA/PCMU or AAC audio, WebRTC requires PCMA/PCMU or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
MSE Requires AAC audio, WebRTC requires PCMU/PCMA, or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
```yaml
go2rtc:
@@ -138,13 +138,3 @@ services:
:::
See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.3#module-webrtc) for more information about this.
### Two way talk
For devices that support two way talk, Frigate can be configured to use the feature from the camera's Live view in the Web UI. You should:
- Set up go2rtc with [WebRTC](#webrtc-extra-configuration).
- Ensure you access Frigate via https (may require [opening port 8971](/frigate/installation/#ports)).
- For the Home Assistant Frigate card, [follow the docs](https://github.com/dermotduffy/frigate-hass-card?tab=readme-ov-file#using-2-way-audio) for the correct source.
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell)

View File

@@ -144,9 +144,7 @@ detectors:
#### SSDLite MobileNet v2
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model.
Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model:
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
```yaml
detectors:
@@ -256,7 +254,6 @@ yolov4x-mish-640
yolov7-tiny-288
yolov7-tiny-416
yolov7-640
yolov7-416
yolov7-320
yolov7x-640
yolov7x-320
@@ -285,8 +282,6 @@ The TensorRT detector can be selected by specifying `tensorrt` as the model type
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
Use the config below to work with generated TRT models:
```yaml
detectors:
tensorrt:
@@ -506,12 +501,11 @@ detectors:
cpu1:
type: cpu
num_threads: 3
model:
path: "/custom_model.tflite"
cpu2:
type: cpu
num_threads: 3
model:
path: "/custom_model.tflite"
```
When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance.
@@ -550,7 +544,7 @@ Hardware accelerated object detection is supported on the following SoCs:
- RK3576
- RK3588
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.0.0.beta0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
### Prerequisites
@@ -623,41 +617,7 @@ $ cat /sys/kernel/debug/rknpu/load
:::
- All models are automatically downloaded and stored in the folder `config/model_cache/rknn_cache`. After upgrading Frigate, you should remove older models to free up space.
- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2`. Note, that there is only post-processing for the supported models.
### Converting your own onnx model to rknn format
To convert a onnx model to the rknn format using the [rknn-toolkit2](https://github.com/airockchip/rknn-toolkit2/) you have to:
- Place one ore more models in onnx format in the directory `config/model_cache/rknn_cache/onnx` on your docker host (this might require `sudo` privileges).
- Save the configuration file under `config/conv2rknn.yaml` (see below for details).
- Run `docker exec <frigate_container_id> python3 /opt/conv2rknn.py`. If the conversion was successful, the rknn models will be placed in `config/model_cache/rknn_cache`.
This is an example configuration file that you need to adjust to your specific onnx model:
```yaml
soc: ["rk3562","rk3566", "rk3568", "rk3576", "rk3588"]
quantization: false
output_name: "{input_basename}"
config:
mean_values: [[0, 0, 0]]
std_values: [[255, 255, 255]]
quant_img_rgb2bgr: true
```
Explanation of the paramters:
- `soc`: A list of all SoCs you want to build the rknn model for. If you don't specify this parameter, the script tries to find out your SoC and builds the rknn model for this one.
- `quantization`: true: 8 bit integer (i8) quantization, false: 16 bit float (fp16). Default: false.
- `output_name`: The output name of the model. The following variables are available:
- `quant`: "i8" or "fp16" depending on the config
- `input_basename`: the basename of the input model (e.g. "my_model" if the input model is calles "my_model.onnx")
- `soc`: the SoC this model was build for (e.g. "rk3588")
- `tk_version`: Version of `rknn-toolkit2` (e.g. "2.3.0")
- **example**: Specifying `output_name = "frigate-{quant}-{input_basename}-{soc}-v{tk_version}"` could result in a model called `frigate-i8-my_model-rk3588-v2.3.0.rknn`.
- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.0_EN.pdf).
- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2` (requires a x86 machine). Note, that there is only post-processing for the supported models.
## Hailo-8l
@@ -672,6 +632,8 @@ detectors:
hailo8l:
type: hailo8l
device: PCIe
model:
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
model:
width: 300
@@ -679,5 +641,4 @@ model:
input_tensor: nhwc
input_pixel_format: bgr
model_type: ssd
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
```

View File

@@ -52,7 +52,7 @@ detectors:
# Required: name of the detector
detector_name:
# Required: type of the detector
# Frigate provides many types, see https://docs.frigate.video/configuration/object_detectors for more details (default: shown below)
# Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below)
# Additional detector types can also be plugged in.
# Detectors may require additional configuration.
# Refer to the Detectors configuration page for more information.
@@ -117,27 +117,25 @@ auth:
hash_iterations: 600000
# Optional: model modifications
# NOTE: The default values are for the EdgeTPU detector.
# Other detectors will require the model config to be set.
model:
# Required: path to the model (default: automatic based on detector)
# Optional: path to the model (default: automatic based on detector)
path: /edgetpu_model.tflite
# Required: path to the labelmap (default: shown below)
# Optional: path to the labelmap (default: shown below)
labelmap_path: /labelmap.txt
# Required: Object detection model input width (default: shown below)
width: 320
# Required: Object detection model input height (default: shown below)
height: 320
# Required: Object detection model input colorspace
# Optional: Object detection model input colorspace
# Valid values are rgb, bgr, or yuv. (default: shown below)
input_pixel_format: rgb
# Required: Object detection model input tensor format
# Optional: Object detection model input tensor format
# Valid values are nhwc or nchw (default: shown below)
input_tensor: nhwc
# Required: Object detection model type, currently only used with the OpenVINO detector
# Optional: Object detection model type, currently only used with the OpenVINO detector
# Valid values are ssd, yolox, yolonas (default: shown below)
model_type: ssd
# Required: Label name modifications. These are merged into the standard labelmap.
# Optional: Label name modifications. These are merged into the standard labelmap.
labelmap:
2: vehicle
# Optional: Map of object labels to their attribute labels (default: depends on model)
@@ -244,8 +242,6 @@ ffmpeg:
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
# NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout.
retry_interval: 10
# Optional: Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players. (default: shown below)
apple_compatibility: false
# Optional: Detect configuration
# NOTE: Can be overridden at the camera level
@@ -526,14 +522,6 @@ semantic_search:
# NOTE: small model runs on CPU and large model runs on GPU
model_size: "small"
# Optional: Configuration for face recognition capability
face_recognition:
# Optional: Enable semantic search (default: shown below)
enabled: False
# Optional: Set the model size used for embeddings. (default: shown below)
# NOTE: small model runs on CPU and large model runs on GPU
model_size: "small"
# Optional: Configuration for AI generated tracked object descriptions
# NOTE: Semantic Search must be enabled for this to do anything.
# WARNING: Depending on the provider, this will send thumbnails over the internet
@@ -698,7 +686,6 @@ cameras:
# to enable PTZ controls.
onvif:
# Required: host of the camera being connected to.
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
host: 0.0.0.0
# Optional: ONVIF port for device (default: shown below).
port: 8000
@@ -707,8 +694,6 @@ cameras:
user: admin
# Optional: password for login.
password: admin
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
tls_insecure: False
# Optional: Ignores time synchronization mismatches between the camera and the server during authentication.
# Using NTP on both ends is recommended and this should only be set to True in a "safe" environment due to the security risk it represents.
ignore_time_mismatch: False
@@ -772,8 +757,6 @@ cameras:
- cat
# Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify)
required_zones: []
# Optional: Save thumbnails sent to generative AI for review/debugging purposes (default: shown below)
debug_save_thumbnails: False
# Optional
ui:
@@ -815,13 +798,11 @@ telemetry:
- lo
# Optional: Configure system stats
stats:
# Optional: Enable AMD GPU stats (default: shown below)
# Enable AMD GPU stats (default: shown below)
amd_gpu_stats: True
# Optional: Enable Intel GPU stats (default: shown below)
# Enable Intel GPU stats (default: shown below)
intel_gpu_stats: True
# Optional: Treat GPU as SR-IOV to fix GPU stats (default: shown below)
sriov: False
# Optional: Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
# Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
# NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled.
network_bandwidth: False
# Optional: Enable the latest version outbound check (default: shown below)

View File

@@ -305,15 +305,8 @@ To install make sure you have the [community app plugin here](https://forums.unr
## Proxmox
[According to Proxmox documentation](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_pct) it is recommended that you run application containers like Frigate inside a Proxmox QEMU VM. This will give you all the advantages of application containerization, while also providing the benefits that VMs offer, such as strong isolation from the host and the ability to live-migrate, which otherwise isnt possible with containers.
It is recommended to run Frigate in LXC, rather than in a VM, for maximum performance. The setup can be complex so be prepared to read the Proxmox and LXC documentation. Suggestions include:
:::warning
If you choose to run Frigate via LXC in Proxmox the setup can be complex so be prepared to read the Proxmox and LXC documentation, Frigate does not officially support running inside of an LXC.
:::
Suggestions include:
- For Intel-based hardware acceleration, to allow access to the `/dev/dri/renderD128` device with major number 226 and minor number 128, add the following lines to the `/etc/pve/lxc/<id>.conf` LXC configuration:
- `lxc.cgroup2.devices.allow: c 226:128 rwm`
- `lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file`

View File

@@ -3,15 +3,7 @@ id: recordings
title: Troubleshooting Recordings
---
## I have Frigate configured for motion recording only, but it still seems to be recording even with no motion. Why?
You'll want to:
- Make sure your camera's timestamp is masked out with a motion mask. Even if there is no motion occurring in your scene, your motion settings may be sensitive enough to count your timestamp as motion.
- If you have audio detection enabled, keep in mind that audio that is heard above `min_volume` is considered motion.
- [Tune your motion detection settings](/configuration/motion_detection) either by editing your config file or by using the UI's Motion Tuner.
## I see the message: WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
### WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
This error can be caused by a number of different issues. The first step in troubleshooting is to enable debug logging for recording. This will enable logging showing how long it takes for recordings to be moved from RAM cache to the disk.
@@ -48,7 +40,6 @@ On linux, some helpful tools/commands in diagnosing would be:
On modern linux kernels, the system will utilize some swap if enabled. Setting vm.swappiness=1 no longer means that the kernel will only swap in order to avoid OOM. To prevent any swapping inside a container, set allocations memory and memory+swap to be the same and disable swapping by setting the following docker/podman run parameters:
**Compose example**
```yaml
version: "3.9"
services:
@@ -63,7 +54,6 @@ services:
```
**Run command example**
```
--memory=<MAXRAM> --memory-swap=<MAXSWAP> --memory-swappiness=0
```

7061
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -17,15 +17,15 @@
"write-heading-ids": "docusaurus write-heading-ids"
},
"dependencies": {
"@docusaurus/core": "^3.6.3",
"@docusaurus/preset-classic": "^3.6.3",
"@docusaurus/theme-mermaid": "^3.6.3",
"@docusaurus/plugin-content-docs": "^3.6.3",
"@mdx-js/react": "^3.1.0",
"@docusaurus/core": "^3.5.2",
"@docusaurus/preset-classic": "^3.5.2",
"@docusaurus/theme-mermaid": "^3.5.2",
"@docusaurus/plugin-content-docs": "^3.5.2",
"@mdx-js/react": "^3.0.1",
"clsx": "^2.1.1",
"docusaurus-plugin-openapi-docs": "^4.3.1",
"docusaurus-theme-openapi-docs": "^4.3.1",
"prism-react-renderer": "^2.4.1",
"docusaurus-plugin-openapi-docs": "^4.1.0",
"docusaurus-theme-openapi-docs": "^4.1.0",
"prism-react-renderer": "^2.4.0",
"raw-loader": "^4.0.2",
"react": "^18.3.1",
"react-dom": "^18.3.1"

View File

@@ -36,8 +36,6 @@ const sidebars: SidebarsConfig = {
'Semantic Search': [
'configuration/semantic_search',
'configuration/genai',
'configuration/face_recognition',
'configuration/license_plate_recognition',
],
Cameras: [
'configuration/cameras',

View File

@@ -3,15 +3,12 @@ import faulthandler
import signal
import sys
import threading
from typing import Union
import ruamel.yaml
from pydantic import ValidationError
from frigate.app import FrigateApp
from frigate.config import FrigateConfig
from frigate.log import setup_logging
from frigate.util.config import find_config_file
def main() -> None:
@@ -45,51 +42,10 @@ def main() -> None:
print("*************************************************************")
print("*************************************************************")
print("*** Config Validation Errors ***")
print("*************************************************************\n")
# Attempt to get the original config file for line number tracking
config_path = find_config_file()
with open(config_path, "r") as f:
yaml_config = ruamel.yaml.YAML()
yaml_config.preserve_quotes = True
full_config = yaml_config.load(f)
print("*************************************************************")
for error in e.errors():
error_path = error["loc"]
current = full_config
line_number = "Unknown"
last_line_number = "Unknown"
try:
for i, part in enumerate(error_path):
key: Union[int, str] = (
int(part) if isinstance(part, str) and part.isdigit() else part
)
if isinstance(current, ruamel.yaml.comments.CommentedMap):
current = current[key]
elif isinstance(current, list):
if isinstance(key, int):
current = current[key]
if hasattr(current, "lc"):
last_line_number = current.lc.line
if i == len(error_path) - 1:
if hasattr(current, "lc"):
line_number = current.lc.line
else:
line_number = last_line_number
except Exception as traverse_error:
print(f"Could not determine exact line number: {traverse_error}")
if current != full_config:
print(f"Line # : {line_number}")
print(f"Key : {' -> '.join(map(str, error_path))}")
print(f"Value : {error.get('input', '-')}")
print(f"Message : {error.get('msg', error.get('type', 'Unknown'))}\n")
location = ".".join(str(item) for item in error["loc"])
print(f"{location}: {error['msg']}")
print("*************************************************************")
print("*** End Config Validation Errors ***")
print("*************************************************************")

View File

@@ -7,18 +7,15 @@ import os
import traceback
from datetime import datetime, timedelta
from functools import reduce
from io import StringIO
from typing import Any, Optional
import requests
import ruamel.yaml
from fastapi import APIRouter, Body, Path, Request, Response
from fastapi.encoders import jsonable_encoder
from fastapi.params import Depends
from fastapi.responses import JSONResponse, PlainTextResponse
from markupsafe import escape
from peewee import operator
from pydantic import ValidationError
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
from frigate.api.defs.request.app_body import AppConfigSetBody
@@ -142,8 +139,6 @@ def config(request: Request):
mode="json", warnings="none", exclude_none=True
)
for stream_name, stream in go2rtc.get("streams", {}).items():
if stream is None:
continue
if isinstance(stream, str):
cleaned = clean_camera_user_pass(stream)
else:
@@ -188,6 +183,7 @@ def config_raw():
@router.post("/config/save")
def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
new_config = body.decode()
if not new_config:
return JSONResponse(
content=(
@@ -198,64 +194,13 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
# Validate the config schema
try:
# Use ruamel to parse and preserve line numbers
yaml_config = ruamel.yaml.YAML()
yaml_config.preserve_quotes = True
full_config = yaml_config.load(StringIO(new_config))
FrigateConfig.parse_yaml(new_config)
except ValidationError as e:
error_message = []
for error in e.errors():
error_path = error["loc"]
current = full_config
line_number = "Unknown"
last_line_number = "Unknown"
try:
for i, part in enumerate(error_path):
key = int(part) if part.isdigit() else part
if isinstance(current, ruamel.yaml.comments.CommentedMap):
current = current[key]
elif isinstance(current, list):
current = current[key]
if hasattr(current, "lc"):
last_line_number = current.lc.line
if i == len(error_path) - 1:
if hasattr(current, "lc"):
line_number = current.lc.line
else:
line_number = last_line_number
except Exception:
line_number = "Unable to determine"
error_message.append(
f"Line {line_number}: {' -> '.join(map(str, error_path))} - {error.get('msg', error.get('type', 'Unknown'))}"
)
return JSONResponse(
content=(
{
"success": False,
"message": "Your configuration is invalid.\nSee the official documentation at docs.frigate.video.\n\n"
+ "\n".join(error_message),
}
),
status_code=400,
)
except Exception:
return JSONResponse(
content=(
{
"success": False,
"message": f"\nYour configuration is invalid.\nSee the official documentation at docs.frigate.video.\n\n{escape(str(traceback.format_exc()))}",
"message": f"\nConfig Error:\n\n{escape(str(traceback.format_exc()))}",
}
),
status_code=400,

View File

@@ -1,127 +0,0 @@
"""Object classification APIs."""
import logging
import os
import random
import shutil
import string
from fastapi import APIRouter, Request, UploadFile
from fastapi.responses import JSONResponse
from pathvalidate import sanitize_filename
from frigate.api.defs.tags import Tags
from frigate.const import FACE_DIR
from frigate.embeddings import EmbeddingsContext
logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.events])
@router.get("/faces")
def get_faces():
face_dict: dict[str, list[str]] = {}
for name in os.listdir(FACE_DIR):
face_dir = os.path.join(FACE_DIR, name)
if not os.path.isdir(face_dir):
continue
face_dict[name] = []
for file in sorted(
os.listdir(face_dir),
key=lambda f: os.path.getctime(os.path.join(face_dir, f)),
reverse=True,
):
face_dict[name].append(file)
return JSONResponse(status_code=200, content=face_dict)
@router.post("/faces/{name}")
async def register_face(request: Request, name: str, file: UploadFile):
if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse(
status_code=400,
content={"message": "Face recognition is not enabled.", "success": False},
)
context: EmbeddingsContext = request.app.embeddings
result = context.register_face(name, await file.read())
return JSONResponse(
status_code=200 if result.get("success", True) else 400,
content=result,
)
@router.post("/faces/train/{name}/classify")
def train_face(request: Request, name: str, body: dict = None):
if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse(
status_code=400,
content={"message": "Face recognition is not enabled.", "success": False},
)
json: dict[str, any] = body or {}
training_file = os.path.join(
FACE_DIR, f"train/{sanitize_filename(json.get('training_file', ''))}"
)
if not training_file or not os.path.isfile(training_file):
return JSONResponse(
content=(
{
"success": False,
"message": f"Invalid filename or no file exists: {training_file}",
}
),
status_code=404,
)
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
new_name = f"{name}-{rand_id}.webp"
new_file = os.path.join(FACE_DIR, f"{name}/{new_name}")
shutil.move(training_file, new_file)
context: EmbeddingsContext = request.app.embeddings
context.clear_face_classifier()
return JSONResponse(
content=(
{
"success": True,
"message": f"Successfully saved {training_file} as {new_name}.",
}
),
status_code=200,
)
@router.post("/faces/{name}/delete")
def deregister_faces(request: Request, name: str, body: dict = None):
if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse(
status_code=400,
content={"message": "Face recognition is not enabled.", "success": False},
)
json: dict[str, any] = body or {}
list_of_ids = json.get("ids", "")
if not list_of_ids or len(list_of_ids) == 0:
return JSONResponse(
content=({"success": False, "message": "Not a valid list of ids"}),
status_code=404,
)
context: EmbeddingsContext = request.app.embeddings
context.delete_face_ids(
name, map(lambda file: sanitize_filename(file), list_of_ids)
)
return JSONResponse(
content=({"success": True, "message": "Successfully deleted faces."}),
status_code=200,
)

View File

@@ -20,7 +20,6 @@ class MediaLatestFrameQueryParams(BaseModel):
regions: Optional[int] = None
quality: Optional[int] = 70
height: Optional[int] = None
store: Optional[int] = None
class MediaEventsSnapshotQueryParams(BaseModel):

View File

@@ -8,9 +8,6 @@ class EventsSubLabelBody(BaseModel):
subLabelScore: Optional[float] = Field(
title="Score for sub label", default=None, gt=0.0, le=1.0
)
camera: Optional[str] = Field(
title="Camera this object is detected on.", default=None
)
class EventsDescriptionBody(BaseModel):

View File

@@ -10,5 +10,4 @@ class Tags(Enum):
review = "Review"
export = "Export"
events = "Events"
classification = "classification"
auth = "Auth"

View File

@@ -909,59 +909,38 @@ def set_sub_label(
try:
event: Event = Event.get(Event.id == event_id)
except DoesNotExist:
if not body.camera:
return JSONResponse(
content=(
{
"success": False,
"message": "Event "
+ event_id
+ " not found and camera is not provided.",
}
),
status_code=404,
)
event = None
if request.app.detected_frames_processor:
tracked_obj: TrackedObject = (
request.app.detected_frames_processor.camera_states[
event.camera if event else body.camera
].tracked_objects.get(event_id)
)
else:
tracked_obj = None
if not event and not tracked_obj:
return JSONResponse(
content=(
{"success": False, "message": "Event " + event_id + " not found."}
),
content=({"success": False, "message": "Event " + event_id + " not found"}),
status_code=404,
)
new_sub_label = body.subLabel
new_score = body.subLabelScore
if tracked_obj:
tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score)
if not event.end_time:
# update tracked object
tracked_obj: TrackedObject = (
request.app.detected_frames_processor.camera_states[
event.camera
].tracked_objects.get(event.id)
)
if tracked_obj:
tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score)
# update timeline items
Timeline.update(
data=Timeline.data.update({"sub_label": (new_sub_label, new_score)})
).where(Timeline.source_id == event_id).execute()
if event:
event.sub_label = new_sub_label
event.sub_label = new_sub_label
if new_score:
data = event.data
data["sub_label_score"] = new_score
event.data = data
event.save()
if new_score:
data = event.data
data["sub_label_score"] = new_score
event.data = data
event.save()
return JSONResponse(
content=(
{

View File

@@ -11,16 +11,7 @@ from starlette_context import middleware, plugins
from starlette_context.plugins import Plugin
from frigate.api import app as main_app
from frigate.api import (
auth,
classification,
event,
export,
media,
notification,
preview,
review,
)
from frigate.api import auth, event, export, media, notification, preview, review
from frigate.api.auth import get_jwt_secret, limiter
from frigate.comms.event_metadata_updater import (
EventMetadataPublisher,
@@ -108,7 +99,6 @@ def create_fastapi_app(
# Routes
# Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters
app.include_router(auth.router)
app.include_router(classification.router)
app.include_router(review.router)
app.include_router(main_app.router)
app.include_router(preview.router)

View File

@@ -179,12 +179,7 @@ def latest_frame(
return Response(
content=img.tobytes(),
media_type=f"image/{extension}",
headers={
"Content-Type": f"image/{extension}",
"Cache-Control": "no-store"
if not params.store
else "private, max-age=60",
},
headers={"Content-Type": f"image/{extension}", "Cache-Control": "no-store"},
)
elif camera_name == "birdseye" and request.app.frigate_config.birdseye.restream:
frame = cv2.cvtColor(
@@ -203,12 +198,7 @@ def latest_frame(
return Response(
content=img.tobytes(),
media_type=f"image/{extension}",
headers={
"Content-Type": f"image/{extension}",
"Cache-Control": "no-store"
if not params.store
else "private, max-age=60",
},
headers={"Content-Type": f"image/{extension}", "Cache-Control": "no-store"},
)
else:
return JSONResponse(

View File

@@ -34,12 +34,10 @@ from frigate.const import (
CLIPS_DIR,
CONFIG_DIR,
EXPORT_DIR,
FACE_DIR,
MODEL_CACHE_DIR,
RECORD_DIR,
SHM_FRAMES_VAR,
)
from frigate.data_processing.types import DataProcessorMetrics
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.embeddings import EmbeddingsContext, manage_embeddings
from frigate.events.audio import AudioProcessor
@@ -90,9 +88,6 @@ class FrigateApp:
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
self.log_queue: Queue = mp.Queue()
self.camera_metrics: dict[str, CameraMetrics] = {}
self.embeddings_metrics: DataProcessorMetrics | None = (
DataProcessorMetrics() if config.semantic_search.enabled else None
)
self.ptz_metrics: dict[str, PTZMetrics] = {}
self.processes: dict[str, int] = {}
self.embeddings: Optional[EmbeddingsContext] = None
@@ -101,19 +96,14 @@ class FrigateApp:
self.config = config
def ensure_dirs(self) -> None:
dirs = [
for d in [
CONFIG_DIR,
RECORD_DIR,
f"{CLIPS_DIR}/cache",
CACHE_DIR,
MODEL_CACHE_DIR,
EXPORT_DIR,
]
if self.config.face_recognition.enabled:
dirs.append(FACE_DIR)
for d in dirs:
]:
if not os.path.exists(d) and not os.path.islink(d):
logger.info(f"Creating directory: {d}")
os.makedirs(d)
@@ -239,10 +229,7 @@ class FrigateApp:
embedding_process = util.Process(
target=manage_embeddings,
name="embeddings_manager",
args=(
self.config,
self.embeddings_metrics,
),
args=(self.config,),
)
embedding_process.daemon = True
self.embedding_process = embedding_process
@@ -450,7 +437,7 @@ class FrigateApp:
# pre-create shms
for i in range(shm_frame_count):
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
self.frame_manager.create(f"{config.name}_frame{i}", frame_size)
self.frame_manager.create(f"{config.name}_{i}", frame_size)
capture_process = util.Process(
target=capture_camera,
@@ -504,11 +491,7 @@ class FrigateApp:
self.stats_emitter = StatsEmitter(
self.config,
stats_init(
self.config,
self.camera_metrics,
self.embeddings_metrics,
self.detectors,
self.processes,
self.config, self.camera_metrics, self.detectors, self.processes
),
self.stop_event,
)

View File

@@ -1,130 +0,0 @@
"""Manage camera activity and updating listeners."""
from collections import Counter
from typing import Callable
from frigate.config.config import FrigateConfig
class CameraActivityManager:
def __init__(
self, config: FrigateConfig, publish: Callable[[str, any], None]
) -> None:
self.config = config
self.publish = publish
self.last_camera_activity: dict[str, dict[str, any]] = {}
self.camera_all_object_counts: dict[str, Counter] = {}
self.camera_active_object_counts: dict[str, Counter] = {}
self.zone_all_object_counts: dict[str, Counter] = {}
self.zone_active_object_counts: dict[str, Counter] = {}
self.all_zone_labels: dict[str, set[str]] = {}
for camera_config in config.cameras.values():
if not camera_config.enabled:
continue
self.last_camera_activity[camera_config.name] = {}
self.camera_all_object_counts[camera_config.name] = Counter()
self.camera_active_object_counts[camera_config.name] = Counter()
for zone, zone_config in camera_config.zones.items():
if zone not in self.all_zone_labels:
self.zone_all_object_counts[zone] = Counter()
self.zone_active_object_counts[zone] = Counter()
self.all_zone_labels[zone] = set()
self.all_zone_labels[zone].update(zone_config.objects)
def update_activity(self, new_activity: dict[str, dict[str, any]]) -> None:
all_objects: list[dict[str, any]] = []
for camera in new_activity.keys():
new_objects = new_activity[camera].get("objects", [])
all_objects.extend(new_objects)
if self.last_camera_activity.get(camera, {}).get("objects") != new_objects:
self.compare_camera_activity(camera, new_objects)
# run through every zone, getting a count of objects in that zone right now
for zone, labels in self.all_zone_labels.items():
all_zone_objects = Counter(
obj["label"].replace("-verified", "")
for obj in all_objects
if zone in obj["current_zones"]
)
active_zone_objects = Counter(
obj["label"].replace("-verified", "")
for obj in all_objects
if zone in obj["current_zones"] and not obj["stationary"]
)
any_changed = False
# run through each object and check what topics need to be updated for this zone
for label in labels:
new_count = all_zone_objects[label]
new_active_count = active_zone_objects[label]
if (
new_count != self.zone_all_object_counts[zone][label]
or label not in self.zone_all_object_counts[zone]
):
any_changed = True
self.publish(f"{zone}/{label}", new_count)
self.zone_all_object_counts[zone][label] = new_count
if (
new_active_count != self.zone_active_object_counts[zone][label]
or label not in self.zone_active_object_counts[zone]
):
any_changed = True
self.publish(f"{zone}/{label}/active", new_active_count)
self.zone_active_object_counts[zone][label] = new_active_count
if any_changed:
self.publish(f"{zone}/all", sum(list(all_zone_objects.values())))
self.publish(
f"{zone}/all/active", sum(list(active_zone_objects.values()))
)
self.last_camera_activity = new_activity
def compare_camera_activity(
self, camera: str, new_activity: dict[str, any]
) -> None:
all_objects = Counter(
obj["label"].replace("-verified", "") for obj in new_activity
)
active_objects = Counter(
obj["label"].replace("-verified", "")
for obj in new_activity
if not obj["stationary"]
)
any_changed = False
# run through each object and check what topics need to be updated
for label in self.config.cameras[camera].objects.track:
if label in self.config.model.all_attributes:
continue
new_count = all_objects[label]
new_active_count = active_objects[label]
if (
new_count != self.camera_all_object_counts[camera][label]
or label not in self.camera_all_object_counts[camera]
):
any_changed = True
self.publish(f"{camera}/{label}", new_count)
self.camera_all_object_counts[camera][label] = new_count
if (
new_active_count != self.camera_active_object_counts[camera][label]
or label not in self.camera_active_object_counts[camera]
):
any_changed = True
self.publish(f"{camera}/{label}/active", new_active_count)
self.camera_active_object_counts[camera][label] = new_active_count
if any_changed:
self.publish(f"{camera}/all", sum(list(all_objects.values())))
self.publish(f"{camera}/all/active", sum(list(active_objects.values())))

View File

@@ -7,7 +7,6 @@ from abc import ABC, abstractmethod
from typing import Any, Callable, Optional
from frigate.camera import PTZMetrics
from frigate.camera.activity_manager import CameraActivityManager
from frigate.comms.config_updater import ConfigPublisher
from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import (
@@ -65,7 +64,7 @@ class Dispatcher:
self.onvif = onvif
self.ptz_metrics = ptz_metrics
self.comms = communicators
self.camera_activity = CameraActivityManager(config, self.publish)
self.camera_activity = {}
self.model_state = {}
self.embeddings_reindex = {}
@@ -131,7 +130,7 @@ class Dispatcher:
).execute()
def handle_update_camera_activity():
self.camera_activity.update_activity(payload)
self.camera_activity = payload
def handle_update_event_description():
event: Event = Event.get(Event.id == payload["id"])
@@ -172,7 +171,7 @@ class Dispatcher:
)
def handle_on_connect():
camera_status = self.camera_activity.last_camera_activity.copy()
camera_status = self.camera_activity.copy()
for camera in camera_status.keys():
camera_status[camera]["config"] = {

View File

@@ -9,11 +9,9 @@ SOCKET_REP_REQ = "ipc:///tmp/cache/embeddings"
class EmbeddingsRequestEnum(Enum):
clear_face_classifier = "clear_face_classifier"
embed_description = "embed_description"
embed_thumbnail = "embed_thumbnail"
generate_search = "generate_search"
register_face = "register_face"
class EmbeddingsResponder:
@@ -24,7 +22,7 @@ class EmbeddingsResponder:
def check_for_request(self, process: Callable) -> None:
while True: # load all messages that are queued
has_message, _, _ = zmq.select([self.socket], [], [], 0.01)
has_message, _, _ = zmq.select([self.socket], [], [], 0.1)
if not has_message:
break

View File

@@ -151,7 +151,7 @@ class WebPushClient(Communicator): # type: ignore[misc]
camera: str = payload["after"]["camera"]
title = f"{', '.join(sorted_objects).replace('_', ' ').title()}{' was' if state == 'end' else ''} detected in {', '.join(payload['after']['data']['zones']).replace('_', ' ').title()}"
message = f"Detected on {camera.replace('_', ' ').title()}"
image = f"{payload['after']['thumb_path'].replace('/media/frigate', '')}"
image = f'{payload["after"]["thumb_path"].replace("/media/frigate", "")}'
# if event is ongoing open to live view otherwise open to recordings view
direct_url = f"/review?id={reviewId}" if state == "end" else f"/#{camera}"

View File

@@ -3,13 +3,13 @@ from frigate.detectors import DetectorConfig, ModelConfig # noqa: F401
from .auth import * # noqa: F403
from .camera import * # noqa: F403
from .camera_group import * # noqa: F403
from .classification import * # noqa: F403
from .config import * # noqa: F403
from .database import * # noqa: F403
from .logger import * # noqa: F403
from .mqtt import * # noqa: F403
from .notification import * # noqa: F403
from .proxy import * # noqa: F403
from .semantic_search import * # noqa: F403
from .telemetry import * # noqa: F403
from .tls import * # noqa: F403
from .ui import * # noqa: F403

View File

@@ -167,7 +167,7 @@ class CameraConfig(FrigateBaseModel):
record_args = get_ffmpeg_arg_list(
parse_preset_output_record(
self.ffmpeg.output_args.record,
self.ffmpeg.apple_compatibility,
self.ffmpeg.output_args._force_record_hvc1,
)
or self.ffmpeg.output_args.record
)

View File

@@ -2,7 +2,7 @@ import shutil
from enum import Enum
from typing import Union
from pydantic import Field, field_validator
from pydantic import Field, PrivateAttr, field_validator
from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS
@@ -42,6 +42,7 @@ class FfmpegOutputArgsConfig(FrigateBaseModel):
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Record role FFmpeg output arguments.",
)
_force_record_hvc1: bool = PrivateAttr(default=False)
class FfmpegConfig(FrigateBaseModel):
@@ -63,10 +64,6 @@ class FfmpegConfig(FrigateBaseModel):
default=10.0,
title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
)
apple_compatibility: bool = Field(
default=False,
title="Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players.",
)
@property
def ffmpeg_path(self) -> str:

View File

@@ -38,10 +38,6 @@ class GenAICameraConfig(BaseModel):
default_factory=list,
title="List of required zones to be entered in order to run generative AI.",
)
debug_save_thumbnails: bool = Field(
default=False,
title="Save thumbnails sent to generative AI for debugging purposes.",
)
@field_validator("required_zones", mode="before")
@classmethod

View File

@@ -1,6 +1,6 @@
from typing import Any, Optional, Union
from pydantic import Field, PrivateAttr, field_serializer
from pydantic import Field, field_serializer
from ..base import FrigateBaseModel
@@ -53,20 +53,3 @@ class ObjectConfig(FrigateBaseModel):
default_factory=dict, title="Object filters."
)
mask: Union[str, list[str]] = Field(default="", title="Object mask.")
_all_objects: list[str] = PrivateAttr()
@property
def all_objects(self) -> list[str]:
return self._all_objects
def parse_all_objects(self, cameras):
if "_all_objects" in self:
return
# get list of unique enabled labels for tracking
enabled_labels = set(self.track)
for camera in cameras.values():
enabled_labels.update(camera.objects.track)
self._all_objects = list(enabled_labels)

View File

@@ -74,7 +74,6 @@ class OnvifConfig(FrigateBaseModel):
port: int = Field(default=8000, title="Onvif Port")
user: Optional[EnvString] = Field(default=None, title="Onvif Username")
password: Optional[EnvString] = Field(default=None, title="Onvif Password")
tls_insecure: bool = Field(default=False, title="Onvif Disable TLS verification")
autotracking: PtzAutotrackConfig = Field(
default_factory=PtzAutotrackConfig,
title="PTZ auto tracking config.",

View File

@@ -4,7 +4,6 @@ from typing import Optional
from pydantic import Field
from frigate.const import MAX_PRE_CAPTURE
from frigate.review.types import SeverityEnum
from ..base import FrigateBaseModel
@@ -102,15 +101,3 @@ class RecordConfig(FrigateBaseModel):
self.alerts.pre_capture,
self.detections.pre_capture,
)
def get_review_pre_capture(self, severity: SeverityEnum) -> int:
if severity == SeverityEnum.alert:
return self.alerts.pre_capture
else:
return self.detections.pre_capture
def get_review_post_capture(self, severity: SeverityEnum) -> int:
if severity == SeverityEnum.alert:
return self.alerts.post_capture
else:
return self.detections.post_capture

View File

@@ -85,7 +85,7 @@ class ZoneConfig(BaseModel):
if explicit:
self.coordinates = ",".join(
[
f"{round(int(p.split(',')[0]) / frame_shape[1], 3)},{round(int(p.split(',')[1]) / frame_shape[0], 3)}"
f'{round(int(p.split(",")[0]) / frame_shape[1], 3)},{round(int(p.split(",")[1]) / frame_shape[0], 3)}'
for p in coordinates
]
)

View File

@@ -1,74 +0,0 @@
from typing import Dict, List, Optional
from pydantic import Field
from .base import FrigateBaseModel
__all__ = [
"FaceRecognitionConfig",
"SemanticSearchConfig",
"LicensePlateRecognitionConfig",
]
class BirdClassificationConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable bird classification.")
threshold: float = Field(
default=0.9,
title="Minimum classification score required to be considered a match.",
gt=0.0,
le=1.0,
)
class ClassificationConfig(FrigateBaseModel):
bird: BirdClassificationConfig = Field(
default_factory=BirdClassificationConfig, title="Bird classification config."
)
class SemanticSearchConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable semantic search.")
reindex: Optional[bool] = Field(
default=False, title="Reindex all detections on startup."
)
model_size: str = Field(
default="small", title="The size of the embeddings model used."
)
class FaceRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable face recognition.")
min_score: float = Field(
title="Minimum face distance score required to save the attempt.",
default=0.8,
gt=0.0,
le=1.0,
)
threshold: float = Field(
default=0.9,
title="Minimum face distance score required to be considered a match.",
gt=0.0,
le=1.0,
)
min_area: int = Field(
default=500, title="Min area of face box to consider running face recognition."
)
save_attempts: bool = Field(
default=True, title="Save images of face detections for training."
)
class LicensePlateRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable license plate recognition.")
threshold: float = Field(
default=0.9,
title="License plate confidence score required to be added to the object as a sub label.",
)
min_area: int = Field(
default=500,
title="Min area of license plate to consider running license plate recognition.",
)
known_plates: Optional[Dict[str, List[str]]] = Field(
default={}, title="Known plates to track."
)

View File

@@ -51,18 +51,13 @@ from .camera.review import ReviewConfig
from .camera.snapshots import SnapshotsConfig
from .camera.timestamp import TimestampStyleConfig
from .camera_group import CameraGroupConfig
from .classification import (
ClassificationConfig,
FaceRecognitionConfig,
LicensePlateRecognitionConfig,
SemanticSearchConfig,
)
from .database import DatabaseConfig
from .env import EnvVars
from .logger import LoggerConfig
from .mqtt import MqttConfig
from .notification import NotificationConfig
from .proxy import ProxyConfig
from .semantic_search import SemanticSearchConfig
from .telemetry import TelemetryConfig
from .tls import TlsConfig
from .ui import UIConfig
@@ -164,16 +159,6 @@ class RestreamConfig(BaseModel):
model_config = ConfigDict(extra="allow")
def verify_semantic_search_dependent_configs(config: FrigateConfig) -> None:
"""Verify that semantic search is enabled if required features are enabled."""
if not config.semantic_search.enabled:
if config.genai.enabled:
raise ValueError("Genai requires semantic search to be enabled.")
if config.face_recognition.enabled:
raise ValueError("Face recognition requires semantic to be enabled.")
def verify_config_roles(camera_config: CameraConfig) -> None:
"""Verify that roles are setup in the config correctly."""
assigned_roles = list(
@@ -332,19 +317,9 @@ class FrigateConfig(FrigateBaseModel):
default_factory=TelemetryConfig, title="Telemetry configuration."
)
tls: TlsConfig = Field(default_factory=TlsConfig, title="TLS configuration.")
classification: ClassificationConfig = Field(
default_factory=ClassificationConfig, title="Object classification config."
)
semantic_search: SemanticSearchConfig = Field(
default_factory=SemanticSearchConfig, title="Semantic search configuration."
)
face_recognition: FaceRecognitionConfig = Field(
default_factory=FaceRecognitionConfig, title="Face recognition config."
)
lpr: LicensePlateRecognitionConfig = Field(
default_factory=LicensePlateRecognitionConfig,
title="License Plate recognition config.",
)
ui: UIConfig = Field(default_factory=UIConfig, title="UI configuration.")
# Detector config
@@ -462,12 +437,13 @@ class FrigateConfig(FrigateBaseModel):
camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args
for input in camera_config.ffmpeg.inputs:
need_record_fourcc = False and "record" in input.roles
need_detect_dimensions = "detect" in input.roles and (
camera_config.detect.height is None
or camera_config.detect.width is None
)
if need_detect_dimensions:
if need_detect_dimensions or need_record_fourcc:
stream_info = {"width": 0, "height": 0, "fourcc": None}
try:
stream_info = stream_info_retriever.get_stream_info(
@@ -491,6 +467,14 @@ class FrigateConfig(FrigateBaseModel):
else DEFAULT_DETECT_DIMENSIONS["height"]
)
if need_record_fourcc:
# Apple only supports HEVC if it is hvc1 (vs. hev1)
camera_config.ffmpeg.output_args._force_record_hvc1 = (
stream_info["fourcc"] == "hevc"
if stream_info.get("hevc")
else False
)
# Warn if detect fps > 10
if camera_config.detect.fps > 10:
logger.warning(
@@ -594,8 +578,13 @@ class FrigateConfig(FrigateBaseModel):
verify_autotrack_zones(camera_config)
verify_motion_and_detect(camera_config)
self.objects.parse_all_objects(self.cameras)
self.model.create_colormap(sorted(self.objects.all_objects))
# get list of unique enabled labels for tracking
enabled_labels = set(self.objects.track)
for camera in self.cameras.values():
enabled_labels.update(camera.objects.track)
self.model.create_colormap(sorted(enabled_labels))
self.model.check_and_load_plus_model(self.plus_api)
for key, detector in self.detectors.items():
@@ -605,30 +594,37 @@ class FrigateConfig(FrigateBaseModel):
if isinstance(detector, dict)
else detector.model_dump(warnings="none")
)
detector_config: BaseDetectorConfig = adapter.validate_python(model_dict)
detector_config: DetectorConfig = adapter.validate_python(model_dict)
if detector_config.model is None:
detector_config.model = self.model.model_copy()
else:
path = detector_config.model.path
detector_config.model = self.model.model_copy()
detector_config.model.path = path
# users should not set model themselves
if detector_config.model:
detector_config.model = None
if "path" not in model_dict or len(model_dict.keys()) > 1:
logger.warning(
"Customizing more than a detector model path is unsupported."
)
model_config = self.model.model_dump(exclude_unset=True, warnings="none")
merged_model = deep_merge(
detector_config.model.model_dump(exclude_unset=True, warnings="none"),
self.model.model_dump(exclude_unset=True, warnings="none"),
)
if detector_config.model_path:
model_config["path"] = detector_config.model_path
if "path" not in model_config:
if "path" not in merged_model:
if detector_config.type == "cpu":
model_config["path"] = "/cpu_model.tflite"
merged_model["path"] = "/cpu_model.tflite"
elif detector_config.type == "edgetpu":
model_config["path"] = "/edgetpu_model.tflite"
merged_model["path"] = "/edgetpu_model.tflite"
model = ModelConfig.model_validate(model_config)
model.check_and_load_plus_model(self.plus_api, detector_config.type)
model.compute_model_hash()
detector_config.model = model
detector_config.model = ModelConfig.model_validate(merged_model)
detector_config.model.check_and_load_plus_model(
self.plus_api, detector_config.type
)
detector_config.model.compute_model_hash()
self.detectors[key] = detector_config
verify_semantic_search_dependent_configs(self)
return self
@field_validator("cameras")

View File

@@ -29,7 +29,6 @@ class LoggerConfig(FrigateBaseModel):
logging.getLogger().setLevel(self.default.value.upper())
log_levels = {
"httpx": LogLevel.error,
"werkzeug": LogLevel.error,
"ws4py": LogLevel.error,
**self.logs,

View File

@@ -0,0 +1,17 @@
from typing import Optional
from pydantic import Field
from .base import FrigateBaseModel
__all__ = ["SemanticSearchConfig"]
class SemanticSearchConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable semantic search.")
reindex: Optional[bool] = Field(
default=False, title="Reindex all detections on startup."
)
model_size: str = Field(
default="small", title="The size of the embeddings model used."
)

View File

@@ -11,9 +11,6 @@ class StatsConfig(FrigateBaseModel):
network_bandwidth: bool = Field(
default=False, title="Enable network bandwidth for ffmpeg processes."
)
sriov: bool = Field(
default=False, title="Treat device as SR-IOV to support GPU stats."
)
class TelemetryConfig(FrigateBaseModel):

View File

@@ -5,9 +5,8 @@ DEFAULT_DB_PATH = f"{CONFIG_DIR}/frigate.db"
MODEL_CACHE_DIR = f"{CONFIG_DIR}/model_cache"
BASE_DIR = "/media/frigate"
CLIPS_DIR = f"{BASE_DIR}/clips"
EXPORT_DIR = f"{BASE_DIR}/exports"
FACE_DIR = f"{CLIPS_DIR}/faces"
RECORD_DIR = f"{BASE_DIR}/recordings"
EXPORT_DIR = f"{BASE_DIR}/exports"
BIRDSEYE_PIPE = "/tmp/cache/birdseye"
CACHE_DIR = "/tmp/cache"
FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
@@ -65,7 +64,6 @@ INCLUDED_FFMPEG_VERSIONS = ["7.0", "5.0"]
FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
FFMPEG_HWACCEL_VULKAN = "preset-vulkan"
FFMPEG_HVC1_ARGS = ["-tag:v", "hvc1"]
# Regex constants

View File

@@ -1,43 +0,0 @@
"""Local or remote processors to handle post processing."""
import logging
from abc import ABC, abstractmethod
from frigate.config import FrigateConfig
from ..types import DataProcessorMetrics, PostProcessDataEnum
logger = logging.getLogger(__name__)
class PostProcessorApi(ABC):
@abstractmethod
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics) -> None:
self.config = config
self.metrics = metrics
pass
@abstractmethod
def process_data(
self, data: dict[str, any], data_type: PostProcessDataEnum
) -> None:
"""Processes the data of data type.
Args:
data (dict): containing data about the input.
data_type (enum): Describing the data that is being processed.
Returns:
None.
"""
pass
@abstractmethod
def handle_request(self, request_data: dict[str, any]) -> dict[str, any] | None:
"""Handle metadata requests.
Args:
request_data (dict): containing data about requested change to process.
Returns:
None if request was not handled, otherwise return response.
"""
pass

View File

@@ -1,57 +0,0 @@
"""Local only processors for handling real time object processing."""
import logging
from abc import ABC, abstractmethod
import numpy as np
from frigate.config import FrigateConfig
from ..types import DataProcessorMetrics
logger = logging.getLogger(__name__)
class RealTimeProcessorApi(ABC):
@abstractmethod
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics) -> None:
self.config = config
self.metrics = metrics
pass
@abstractmethod
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray) -> None:
"""Processes the frame with object data.
Args:
obj_data (dict): containing data about focused object in frame.
frame (ndarray): full yuv frame.
Returns:
None.
"""
pass
@abstractmethod
def handle_request(
self, topic: str, request_data: dict[str, any]
) -> dict[str, any] | None:
"""Handle metadata requests.
Args:
topic (str): topic that dictates what work is requested.
request_data (dict): containing data about requested change to process.
Returns:
None if request was not handled, otherwise return response.
"""
pass
@abstractmethod
def expire_object(self, object_id: str) -> None:
"""Handle objects that are no longer detected.
Args:
object_id (str): id of object that is no longer detected.
Returns:
None.
"""
pass

View File

@@ -1,154 +0,0 @@
"""Handle processing images to classify birds."""
import logging
import os
import cv2
import numpy as np
import requests
from frigate.config import FrigateConfig
from frigate.const import FRIGATE_LOCALHOST, MODEL_CACHE_DIR
from frigate.util.object import calculate_region
from ..types import DataProcessorMetrics
from .api import RealTimeProcessorApi
try:
from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError:
from tensorflow.lite.python.interpreter import Interpreter
logger = logging.getLogger(__name__)
class BirdProcessor(RealTimeProcessorApi):
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics):
super().__init__(config, metrics)
self.interpreter: Interpreter = None
self.tensor_input_details: dict[str, any] = None
self.tensor_output_details: dict[str, any] = None
self.detected_birds: dict[str, float] = {}
self.labelmap: dict[int, str] = {}
download_path = os.path.join(MODEL_CACHE_DIR, "bird")
self.model_files = {
"bird.tflite": "https://raw.githubusercontent.com/google-coral/test_data/master/mobilenet_v2_1.0_224_inat_bird_quant.tflite",
"birdmap.txt": "https://raw.githubusercontent.com/google-coral/test_data/master/inat_bird_labels.txt",
}
if not all(
os.path.exists(os.path.join(download_path, n))
for n in self.model_files.keys()
):
# conditionally import ModelDownloader
from frigate.util.downloader import ModelDownloader
self.downloader = ModelDownloader(
model_name="bird",
download_path=download_path,
file_names=self.model_files.keys(),
download_func=self.__download_models,
complete_func=self.__build_detector,
)
self.downloader.ensure_model_files()
else:
self.__build_detector()
def __download_models(self, path: str) -> None:
try:
file_name = os.path.basename(path)
# conditionally import ModelDownloader
from frigate.util.downloader import ModelDownloader
ModelDownloader.download_from_url(self.model_files[file_name], path)
except Exception as e:
logger.error(f"Failed to download {path}: {e}")
def __build_detector(self) -> None:
self.interpreter = Interpreter(
model_path=os.path.join(MODEL_CACHE_DIR, "bird/bird.tflite"),
num_threads=2,
)
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
i = 0
with open(os.path.join(MODEL_CACHE_DIR, "bird/birdmap.txt")) as f:
line = f.readline()
while line:
start = line.find("(")
end = line.find(")")
self.labelmap[i] = line[start + 1 : end]
i += 1
line = f.readline()
def process_frame(self, obj_data, frame):
if obj_data["label"] != "bird":
return
x, y, x2, y2 = calculate_region(
frame.shape,
obj_data["box"][0],
obj_data["box"][1],
obj_data["box"][2],
obj_data["box"][3],
224,
1.0,
)
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
input = rgb[
y:y2,
x:x2,
]
cv2.imwrite("/media/frigate/test_class.png", input)
input = np.expand_dims(input, axis=0)
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input)
self.interpreter.invoke()
res: np.ndarray = self.interpreter.get_tensor(
self.tensor_output_details[0]["index"]
)[0]
probs = res / res.sum(axis=0)
best_id = np.argmax(probs)
if best_id == 964:
logger.debug("No bird classification was detected.")
return
score = round(probs[best_id], 2)
if score < self.config.classification.bird.threshold:
logger.debug(f"Score {score} is not above required threshold")
return
previous_score = self.detected_birds.get(obj_data["id"], 0.0)
if score <= previous_score:
logger.debug(f"Score {score} is worse than previous score {previous_score}")
return
resp = requests.post(
f"{FRIGATE_LOCALHOST}/api/events/{obj_data['id']}/sub_label",
json={
"camera": obj_data.get("camera"),
"subLabel": self.labelmap[best_id],
"subLabelScore": score,
},
)
if resp.status_code == 200:
self.detected_birds[obj_data["id"]] = score
def handle_request(self, topic, request_data):
return None
def expire_object(self, object_id):
if object_id in self.detected_birds:
self.detected_birds.pop(object_id)

View File

@@ -1,406 +0,0 @@
"""Handle processing images for face detection and recognition."""
import base64
import datetime
import logging
import os
import random
import string
from typing import Optional
import cv2
import numpy as np
import requests
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
from frigate.config import FrigateConfig
from frigate.const import FACE_DIR, FRIGATE_LOCALHOST, MODEL_CACHE_DIR
from frigate.util.image import area
from ..types import DataProcessorMetrics
from .api import RealTimeProcessorApi
logger = logging.getLogger(__name__)
MIN_MATCHING_FACES = 2
class FaceProcessor(RealTimeProcessorApi):
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics):
super().__init__(config, metrics)
self.face_config = config.face_recognition
self.face_detector: cv2.FaceDetectorYN = None
self.landmark_detector: cv2.face.FacemarkLBF = None
self.face_recognizer: cv2.face.LBPHFaceRecognizer = None
self.requires_face_detection = "face" not in self.config.objects.all_objects
self.detected_faces: dict[str, float] = {}
download_path = os.path.join(MODEL_CACHE_DIR, "facedet")
self.model_files = {
"facedet.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facedet.onnx",
"landmarkdet.yaml": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/landmarkdet.yaml",
}
if not all(
os.path.exists(os.path.join(download_path, n))
for n in self.model_files.keys()
):
# conditionally import ModelDownloader
from frigate.util.downloader import ModelDownloader
self.downloader = ModelDownloader(
model_name="facedet",
download_path=download_path,
file_names=self.model_files.keys(),
download_func=self.__download_models,
complete_func=self.__build_detector,
)
self.downloader.ensure_model_files()
else:
self.__build_detector()
self.label_map: dict[int, str] = {}
self.__build_classifier()
def __download_models(self, path: str) -> None:
try:
file_name = os.path.basename(path)
# conditionally import ModelDownloader
from frigate.util.downloader import ModelDownloader
ModelDownloader.download_from_url(self.model_files[file_name], path)
except Exception as e:
logger.error(f"Failed to download {path}: {e}")
def __build_detector(self) -> None:
self.face_detector = cv2.FaceDetectorYN.create(
"/config/model_cache/facedet/facedet.onnx",
config="",
input_size=(320, 320),
score_threshold=0.8,
nms_threshold=0.3,
)
self.landmark_detector = cv2.face.createFacemarkLBF()
self.landmark_detector.loadModel("/config/model_cache/facedet/landmarkdet.yaml")
def __build_classifier(self) -> None:
if not self.landmark_detector:
return None
labels = []
faces = []
dir = "/media/frigate/clips/faces"
for idx, name in enumerate(os.listdir(dir)):
if name == "train":
continue
face_folder = os.path.join(dir, name)
if not os.path.isdir(face_folder):
continue
self.label_map[idx] = name
for image in os.listdir(face_folder):
img = cv2.imread(os.path.join(face_folder, image))
if img is None:
continue
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = self.__align_face(img, img.shape[1], img.shape[0])
faces.append(img)
labels.append(idx)
self.recognizer: cv2.face.LBPHFaceRecognizer = (
cv2.face.LBPHFaceRecognizer_create(
radius=2, threshold=(1 - self.face_config.min_score) * 1000
)
)
self.recognizer.train(faces, np.array(labels))
def __align_face(
self,
image: np.ndarray,
output_width: int,
output_height: int,
) -> np.ndarray:
_, lands = self.landmark_detector.fit(
image, np.array([(0, 0, image.shape[1], image.shape[0])])
)
landmarks: np.ndarray = lands[0][0]
# get landmarks for eyes
leftEyePts = landmarks[42:48]
rightEyePts = landmarks[36:42]
# compute the center of mass for each eye
leftEyeCenter = leftEyePts.mean(axis=0).astype("int")
rightEyeCenter = rightEyePts.mean(axis=0).astype("int")
# compute the angle between the eye centroids
dY = rightEyeCenter[1] - leftEyeCenter[1]
dX = rightEyeCenter[0] - leftEyeCenter[0]
angle = np.degrees(np.arctan2(dY, dX)) - 180
# compute the desired right eye x-coordinate based on the
# desired x-coordinate of the left eye
desiredRightEyeX = 1.0 - 0.35
# determine the scale of the new resulting image by taking
# the ratio of the distance between eyes in the *current*
# image to the ratio of distance between eyes in the
# *desired* image
dist = np.sqrt((dX**2) + (dY**2))
desiredDist = desiredRightEyeX - 0.35
desiredDist *= output_width
scale = desiredDist / dist
# compute center (x, y)-coordinates (i.e., the median point)
# between the two eyes in the input image
# grab the rotation matrix for rotating and scaling the face
eyesCenter = (
int((leftEyeCenter[0] + rightEyeCenter[0]) // 2),
int((leftEyeCenter[1] + rightEyeCenter[1]) // 2),
)
M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
# update the translation component of the matrix
tX = output_width * 0.5
tY = output_height * 0.35
M[0, 2] += tX - eyesCenter[0]
M[1, 2] += tY - eyesCenter[1]
# apply the affine transformation
return cv2.warpAffine(
image, M, (output_width, output_height), flags=cv2.INTER_CUBIC
)
def __clear_classifier(self) -> None:
self.face_recognizer = None
self.label_map = {}
def __detect_face(self, input: np.ndarray) -> tuple[int, int, int, int]:
"""Detect faces in input image."""
if not self.face_detector:
return None
self.face_detector.setInputSize((input.shape[1], input.shape[0]))
faces = self.face_detector.detect(input)
if faces is None or faces[1] is None:
return None
face = None
for _, potential_face in enumerate(faces[1]):
raw_bbox = potential_face[0:4].astype(np.uint16)
x: int = max(raw_bbox[0], 0)
y: int = max(raw_bbox[1], 0)
w: int = raw_bbox[2]
h: int = raw_bbox[3]
bbox = (x, y, x + w, y + h)
if face is None or area(bbox) > area(face):
face = bbox
return face
def __classify_face(self, face_image: np.ndarray) -> tuple[str, float] | None:
if not self.landmark_detector:
return None
if not self.label_map:
self.__build_classifier()
img = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
img = self.__align_face(img, img.shape[1], img.shape[0])
index, distance = self.recognizer.predict(img)
if index == -1:
return None
score = 1.0 - (distance / 1000)
return self.label_map[index], round(score, 2)
def __update_metrics(self, duration: float) -> None:
self.metrics.face_rec_fps.value = (
self.metrics.face_rec_fps.value * 9 + duration
) / 10
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):
"""Look for faces in image."""
start = datetime.datetime.now().timestamp()
id = obj_data["id"]
# don't run for non person objects
if obj_data.get("label") != "person":
logger.debug("Not a processing face for non person object.")
return
# don't overwrite sub label for objects that have a sub label
# that is not a face
if obj_data.get("sub_label") and id not in self.detected_faces:
logger.debug(
f"Not processing face due to existing sub label: {obj_data.get('sub_label')}."
)
return
face: Optional[dict[str, any]] = None
if self.requires_face_detection:
logger.debug("Running manual face detection.")
person_box = obj_data.get("box")
if not person_box:
return
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
left, top, right, bottom = person_box
person = rgb[top:bottom, left:right]
face_box = self.__detect_face(person)
if not face_box:
logger.debug("Detected no faces for person object.")
return
face_frame = person[
max(0, face_box[1]) : min(frame.shape[0], face_box[3]),
max(0, face_box[0]) : min(frame.shape[1], face_box[2]),
]
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
else:
# don't run for object without attributes
if not obj_data.get("current_attributes"):
logger.debug("No attributes to parse.")
return
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
for attr in attributes:
if attr.get("label") != "face":
continue
if face is None or attr.get("score", 0.0) > face.get("score", 0.0):
face = attr
# no faces detected in this frame
if not face:
return
face_box = face.get("box")
# check that face is valid
if not face_box or area(face_box) < self.config.face_recognition.min_area:
logger.debug(f"Invalid face box {face}")
return
face_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
face_frame = face_frame[
max(0, face_box[1]) : min(frame.shape[0], face_box[3]),
max(0, face_box[0]) : min(frame.shape[1], face_box[2]),
]
res = self.__classify_face(face_frame)
if not res:
return
sub_label, score = res
# calculate the overall face score as the probability * area of face
# this will help to reduce false positives from small side-angle faces
# if a large front-on face image may have scored slightly lower but
# is more likely to be accurate due to the larger face area
face_score = round(score * face_frame.shape[0] * face_frame.shape[1], 2)
logger.debug(
f"Detected best face for person as: {sub_label} with probability {score} and overall face score {face_score}"
)
if self.config.face_recognition.save_attempts:
# write face to library
folder = os.path.join(FACE_DIR, "train")
file = os.path.join(folder, f"{id}-{sub_label}-{score}-{face_score}.webp")
os.makedirs(folder, exist_ok=True)
cv2.imwrite(file, face_frame)
if score < self.config.face_recognition.threshold:
logger.debug(
f"Recognized face distance {score} is less than threshold {self.config.face_recognition.threshold}"
)
self.__update_metrics(datetime.datetime.now().timestamp() - start)
return
if id in self.detected_faces and face_score <= self.detected_faces[id]:
logger.debug(
f"Recognized face distance {score} and overall score {face_score} is less than previous overall face score ({self.detected_faces.get(id)})."
)
self.__update_metrics(datetime.datetime.now().timestamp() - start)
return
resp = requests.post(
f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label",
json={
"camera": obj_data.get("camera"),
"subLabel": sub_label,
"subLabelScore": score,
},
)
if resp.status_code == 200:
self.detected_faces[id] = face_score
self.__update_metrics(datetime.datetime.now().timestamp() - start)
def handle_request(self, topic, request_data) -> dict[str, any] | None:
if topic == EmbeddingsRequestEnum.clear_face_classifier.value:
self.__clear_classifier()
elif topic == EmbeddingsRequestEnum.register_face.value:
rand_id = "".join(
random.choices(string.ascii_lowercase + string.digits, k=6)
)
label = request_data["face_name"]
id = f"{label}-{rand_id}"
if request_data.get("cropped"):
thumbnail = request_data["image"]
else:
img = cv2.imdecode(
np.frombuffer(
base64.b64decode(request_data["image"]), dtype=np.uint8
),
cv2.IMREAD_COLOR,
)
face_box = self.__detect_face(img)
if not face_box:
return {
"message": "No face was detected.",
"success": False,
}
face = img[face_box[1] : face_box[3], face_box[0] : face_box[2]]
_, thumbnail = cv2.imencode(
".webp", face, [int(cv2.IMWRITE_WEBP_QUALITY), 100]
)
# write face to library
folder = os.path.join(FACE_DIR, label)
file = os.path.join(folder, f"{id}.webp")
os.makedirs(folder, exist_ok=True)
# save face image
with open(file, "wb") as output:
output.write(thumbnail.tobytes())
self.__clear_classifier()
return {
"message": "Successfully registered face.",
"success": True,
}
def expire_object(self, object_id: str):
if object_id in self.detected_faces:
self.detected_faces.pop(object_id)

View File

@@ -1,24 +0,0 @@
"""Embeddings types."""
import multiprocessing as mp
from enum import Enum
from multiprocessing.sharedctypes import Synchronized
class DataProcessorMetrics:
image_embeddings_fps: Synchronized
text_embeddings_sps: Synchronized
face_rec_fps: Synchronized
alpr_pps: Synchronized
def __init__(self):
self.image_embeddings_fps = mp.Value("d", 0.01)
self.text_embeddings_sps = mp.Value("d", 0.01)
self.face_rec_fps = mp.Value("d", 0.01)
self.alpr_pps = mp.Value("d", 0.01)
class PostProcessDataEnum(str, Enum):
recording = "recording"
review = "review"
tracked_object = "tracked_object"

View File

@@ -194,9 +194,6 @@ class BaseDetectorConfig(BaseModel):
model: Optional[ModelConfig] = Field(
default=None, title="Detector specific model configuration."
)
model_path: Optional[str] = Field(
default=None, title="Detector specific model path."
)
model_config = ConfigDict(
extra="allow", arbitrary_types_allowed=True, protected_namespaces=()
)

View File

@@ -108,7 +108,7 @@ class Rknn(DetectionApi):
model_props["model_type"] = model_type
if model_matched:
model_props["filename"] = model_path + f"-{soc}-v2.3.0-1.rknn"
model_props["filename"] = model_path + f"-{soc}-v2.0.0-1.rknn"
model_props["path"] = model_cache_dir + model_props["filename"]
@@ -129,7 +129,7 @@ class Rknn(DetectionApi):
os.mkdir(model_cache_dir)
urllib.request.urlretrieve(
f"https://github.com/MarcA711/rknn-models/releases/download/v2.3.0/{filename}",
f"https://github.com/MarcA711/rknn-models/releases/download/v2.0.0/{filename}",
model_cache_dir + filename,
)

View File

@@ -219,19 +219,19 @@ class TensorRtDetector(DetectionApi):
]
def __init__(self, detector_config: TensorRTDetectorConfig):
assert TRT_SUPPORT, (
f"TensorRT libraries not found, {DETECTOR_KEY} detector not present"
)
assert (
TRT_SUPPORT
), f"TensorRT libraries not found, {DETECTOR_KEY} detector not present"
(cuda_err,) = cuda.cuInit(0)
assert cuda_err == cuda.CUresult.CUDA_SUCCESS, (
f"Failed to initialize cuda {cuda_err}"
)
assert (
cuda_err == cuda.CUresult.CUDA_SUCCESS
), f"Failed to initialize cuda {cuda_err}"
err, dev_count = cuda.cuDeviceGetCount()
logger.debug(f"Num Available Devices: {dev_count}")
assert detector_config.device < dev_count, (
f"Invalid TensorRT Device Config. Device {detector_config.device} Invalid."
)
assert (
detector_config.device < dev_count
), f"Invalid TensorRT Device Config. Device {detector_config.device} Invalid."
err, self.cu_ctx = cuda.cuCtxCreate(
cuda.CUctx_flags.CU_CTX_MAP_HOST, detector_config.device
)

View File

@@ -1,6 +1,5 @@
"""SQLite-vec embeddings database."""
import base64
import json
import logging
import multiprocessing as mp
@@ -14,8 +13,7 @@ from setproctitle import setproctitle
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor
from frigate.config import FrigateConfig
from frigate.const import CONFIG_DIR, FACE_DIR
from frigate.data_processing.types import DataProcessorMetrics
from frigate.const import CONFIG_DIR
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.models import Event
from frigate.util.builtin import serialize
@@ -27,7 +25,7 @@ from .util import ZScoreNormalization
logger = logging.getLogger(__name__)
def manage_embeddings(config: FrigateConfig, metrics: DataProcessorMetrics) -> None:
def manage_embeddings(config: FrigateConfig) -> None:
# Only initialize embeddings if semantic search is enabled
if not config.semantic_search.enabled:
return
@@ -61,7 +59,6 @@ def manage_embeddings(config: FrigateConfig, metrics: DataProcessorMetrics) -> N
maintainer = EmbeddingMaintainer(
db,
config,
metrics,
stop_event,
)
maintainer.start()
@@ -192,38 +189,6 @@ class EmbeddingsContext:
return results
def register_face(self, face_name: str, image_data: bytes) -> dict[str, any]:
return self.requestor.send_data(
EmbeddingsRequestEnum.register_face.value,
{
"face_name": face_name,
"image": base64.b64encode(image_data).decode("ASCII"),
},
)
def get_face_ids(self, name: str) -> list[str]:
sql_query = f"""
SELECT
id
FROM vec_descriptions
WHERE id LIKE '%{name}%'
"""
return self.db.execute_sql(sql_query).fetchall()
def clear_face_classifier(self) -> None:
self.requestor.send_data(
EmbeddingsRequestEnum.clear_face_classifier.value, None
)
def delete_face_ids(self, face: str, ids: list[str]) -> None:
folder = os.path.join(FACE_DIR, face)
for id in ids:
file_path = os.path.join(folder, id)
if os.path.isfile(file_path):
os.unlink(file_path)
def update_description(self, event_id: str, description: str) -> None:
self.requestor.send_data(
EmbeddingsRequestEnum.embed_description.value,

Some files were not shown because too many files have changed in this diff Show More