Compare commits

...

76 Commits

Author SHA1 Message Date
Nicolas Mowen
3d1ebdcbd5 Face recognition improvements (#16034) 2025-01-18 11:52:01 -06:00
Nicolas Mowen
91c6618cab Bird classification (#15966)
* Start working on bird processor

* Initial setup for bird processing

* Improvements to handling

* Get classification working

* Cleanup classification

* Add classification config

* Update sort
2025-01-13 09:09:04 -06:00
Nicolas Mowen
65d1bb6449 Update hailo deps (#15958) 2025-01-12 17:53:30 -06:00
Nicolas Mowen
1ffd0d3897 Processing refactor (#15935)
* Refactor post processor to be real time processor

* Build out generic API for post processing

* Cleanup

* Fix
2025-01-11 07:53:05 -07:00
Nicolas Mowen
2139d621b5 Generalize postprocessing (#15931)
* Actually send result to face registration

* Define postprocessing api and move face processing to fit

* Standardize request handling

* Standardize handling of processors

* Rename processing metrics

* Cleanup

* Standardize object end

* Update to newer formatting

* One more

* One more
2025-01-11 07:53:05 -07:00
Nicolas Mowen
d326846790 Fix onvif packages (#15906)
* Don't replace packages

* Formatting
2025-01-11 07:53:05 -07:00
Josh Hawkins
1a2ef37d95 Only print line and key/value when a line number can be found (#15897) 2025-01-11 07:53:05 -07:00
Nicolas Mowen
384487af41 Upgrade onvif-zeep dependency to use onvif-zeep-async (#15894)
* Upgrade to new dependency

* Start onvif work

* Update for async calls
2025-01-11 07:53:05 -07:00
Nicolas Mowen
7bc0acc2f7 Improvements to face recognition (#15854)
* Do not add margin to face images

* remove margin

* Correctly clear
2025-01-11 07:53:05 -07:00
Nicolas Mowen
fcb4a094e5 Add metrics page for embeddings and face / license plate processing times (#15818)
* Get stats for embeddings inferences

* cleanup embeddings inferences

* Enable UI for feature metrics

* Change threshold

* Fix check

* Update python for actions

* Set python version

* Ignore type for now
2025-01-11 07:53:05 -07:00
Nicolas Mowen
957613b2af Fix facedet download (#15811)
* Support downloading face models

* Handle download and loading correctly

* Add face dir creation

* Fix error

* Fix

* Formatting

* Move upload to button

* Show number of faces in library for each name

* Add text color for score

* Cleanup
2025-01-11 07:53:05 -07:00
Nicolas Mowen
5fa0bfe6d3 Refactor camera activity processing (#15803)
* Replace object label sensors with new manager

* Implement zone topics

* remove unused
2025-01-11 07:53:05 -07:00
Marc Altmann
e8d27d1f91 rockchip: update dependencies and add script for model conversion (#15699)
* rockchip: update dependencies and add script for model conversion

* rockchip: update docs

---------

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
2025-01-11 07:53:05 -07:00
Nicolas Mowen
39ecffbf92 Add support for SR-IOV GPU stats (#15796)
* Add option to treat GPU as SRIOV in order for stats to work correctly

* Add to intel docs

* fix tests
2025-01-11 07:53:05 -07:00
Nicolas Mowen
d5fb957503 Add ffmpeg config to increase HEVC compatibility with Apple devices (#15795)
* Add config option for handling HEVC playback on Apple devices

* Update docs

* Remove unused
2025-01-11 07:53:05 -07:00
Nicolas Mowen
ee81b5623e Implement face recognition training in UI (#15786)
* Rename debug to train

* Add api to train image as person

* Cleanup model running

* Formatting

* Fix

* Set face recognition page title
2025-01-11 07:53:05 -07:00
Nicolas Mowen
e010206efe Add UI for managing face recognitions (#15757)
* Add ability to view attempts

* Improve UI

* Cleanup

* Correctly refresh ui when item is deleted

* Select correct library by default

* Add min score

* Cleanup
2025-01-11 07:53:05 -07:00
Nicolas Mowen
052634d020 Face recognition logic improvements (#15679)
* Always initialize face model on startup

* Add ability to save face images for debugging

* Implement better face recognition reasonability
2025-01-11 07:53:05 -07:00
Nicolas Mowen
44d9c3a654 Change folder 2025-01-11 07:53:05 -07:00
Nicolas Mowen
59088fd10f Set model size 2025-01-11 07:53:05 -07:00
Nicolas Mowen
c6ef5160c1 Improve face recognition (#15670)
* Face recognition tuning

* Support face alignment

* Cleanup

* Correctly download model
2025-01-11 07:53:05 -07:00
Nicolas Mowen
11d8b304a3 Update TRT (#15646) 2025-01-11 07:53:05 -07:00
Nicolas Mowen
52c1c9c327 Make face library scrollable 2025-01-11 07:53:05 -07:00
Nicolas Mowen
d91602acee Update openvino (#15634) 2025-01-11 07:53:05 -07:00
Nicolas Mowen
2b12117df5 Update python deps (#15618)
* Update opencv

* Update cython

* Update scikit

* Update scipy
2025-01-11 07:53:05 -07:00
Nicolas Mowen
4fb2f89ac8 Enable temporary caching of camera images to improve responsiveness of UI (#15614) 2025-01-11 07:53:05 -07:00
Josh Hawkins
c97457d22a Preserve line numbers in config validation (#15584)
* use ruamel to parse and preserve line numbers for config validation

* maintain exception for non validation errors

* fix types

* include input in log messages
2025-01-11 07:53:05 -07:00
Nicolas Mowen
6742363017 Update base image (#15103)
* Change base image

* Update python

* Update coral library

* Fix source file

* Install correct apt packages

* Cleanup

* Fix installation of coral deps

* fix python installations

* Fix devcontainer build

* Get tensorrt build working

* Update other deps

* Filter out tflite log

* Get ROCm build working

* Get rockchip build working

* Get hailo build working

* Add note to comment
2025-01-11 07:53:05 -07:00
Nicolas Mowen
7c4f36716e Face recognition fixes (#15222)
* Fix nginx max upload size

* Close upload dialog when done and add toasts

* Formatting

* fix ruff
2025-01-11 07:53:05 -07:00
Nicolas Mowen
304fc4d44b Improve face recognition (#15205)
* Validate faces using cosine distance and SVC

* Formatting

* Use opencv instead of face embedding

* Update docs for training data

* Adjust to score system

* Set bounds

* remove face embeddings

* Update writing images

* Add face library page

* Add ability to select file

* Install opencv deps

* Cleanup

* Use different deps

* Move deps

* Cleanup

* Only show face library for desktop

* Implement deleting

* Add ability to upload image

* Add support for uploading images
2025-01-11 07:53:05 -07:00
Nicolas Mowen
310af75c86 Remove standardization 2025-01-11 07:53:05 -07:00
Nicolas Mowen
bd6b2868d0 Fix check 2025-01-11 07:53:05 -07:00
Nicolas Mowen
d8cae0f597 Remove hardcoded face name 2025-01-11 07:53:05 -07:00
Nicolas Mowen
27a743ca96 Use SVC to normalize and classify faces for recognition (#14835)
* Add margin to detected faces for embeddings

* Standardize pixel values for face input

* Use SVC to classify faces

* Clear classifier when new face is added

* Formatting

* Add dependency
2025-01-11 07:53:05 -07:00
Josh Hawkins
c269b4c320 Use regular expressions for plate matching (#14727) 2025-01-11 07:53:05 -07:00
Nicolas Mowen
d56b5dac28 Update facenet model (#14647) 2025-01-11 07:53:05 -07:00
Josh Hawkins
15e01b67c9 LPR improvements (#14641) 2025-01-11 07:53:05 -07:00
Josh Hawkins
65a178e8d3 Prevent division by zero in lpr confidence checks (#14615) 2025-01-11 07:53:05 -07:00
Nicolas Mowen
81123f7aec Fix label check (#14610)
* Create config for parsing object

* Use in maintainer
2025-01-11 07:53:05 -07:00
Josh Hawkins
4e2b52db56 License plate recognition (ALPR) backend (#14564)
* Update version

* Face recognition backend (#14495)

* Add basic config and face recognition table

* Reconfigure updates processing to handle face

* Crop frame to face box

* Implement face embedding calculation

* Get matching face embeddings

* Add support face recognition based on existing faces

* Use arcface face embeddings instead of generic embeddings model

* Add apis for managing faces

* Implement face uploading API

* Build out more APIs

* Add min area config

* Handle larger images

* Add more debug logs

* fix calculation

* Reduce timeout

* Small tweaks

* Use webp images

* Use facenet model

* Improve face recognition (#14537)

* Increase requirements for face to be set

* Manage faces properly

* Add basic docs

* Simplify

* Separate out face recognition frome semantic search

* Update docs

* Formatting

* Fix access (#14540)

* Face detection (#14544)

* Add support for face detection

* Add support for detecting faces during registration

* Set body size to be larger

* Undo

* Update version

* Face recognition backend (#14495)

* Add basic config and face recognition table

* Reconfigure updates processing to handle face

* Crop frame to face box

* Implement face embedding calculation

* Get matching face embeddings

* Add support face recognition based on existing faces

* Use arcface face embeddings instead of generic embeddings model

* Add apis for managing faces

* Implement face uploading API

* Build out more APIs

* Add min area config

* Handle larger images

* Add more debug logs

* fix calculation

* Reduce timeout

* Small tweaks

* Use webp images

* Use facenet model

* Improve face recognition (#14537)

* Increase requirements for face to be set

* Manage faces properly

* Add basic docs

* Simplify

* Separate out face recognition frome semantic search

* Update docs

* Formatting

* Fix access (#14540)

* Face detection (#14544)

* Add support for face detection

* Add support for detecting faces during registration

* Set body size to be larger

* Undo

* initial foundation for alpr with paddleocr

* initial foundation for alpr with paddleocr

* initial foundation for alpr with paddleocr

* config

* config

* lpr maintainer

* clean up

* clean up

* fix processing

* don't process for stationary cars

* fix order

* fixes

* check for known plates

* improved length and character by character confidence

* model fixes and small tweaks

* docs

* placeholder for non frigate+ model lp detection

---------

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
2025-01-11 07:53:05 -07:00
Nicolas Mowen
bb36b9ced6 Face detection (#14544)
* Add support for face detection

* Add support for detecting faces during registration

* Set body size to be larger

* Undo
2025-01-11 07:53:05 -07:00
Nicolas Mowen
a90619bff7 Fix access (#14540) 2025-01-11 07:53:04 -07:00
Nicolas Mowen
1ac7b30fb7 Improve face recognition (#14537)
* Increase requirements for face to be set

* Manage faces properly

* Add basic docs

* Simplify

* Separate out face recognition frome semantic search

* Update docs

* Formatting
2025-01-11 07:53:04 -07:00
Nicolas Mowen
ac5bd15fc1 Face recognition backend (#14495)
* Add basic config and face recognition table

* Reconfigure updates processing to handle face

* Crop frame to face box

* Implement face embedding calculation

* Get matching face embeddings

* Add support face recognition based on existing faces

* Use arcface face embeddings instead of generic embeddings model

* Add apis for managing faces

* Implement face uploading API

* Build out more APIs

* Add min area config

* Handle larger images

* Add more debug logs

* fix calculation

* Reduce timeout

* Small tweaks

* Use webp images

* Use facenet model
2025-01-11 07:53:04 -07:00
Nicolas Mowen
b380f94297 Update version 2025-01-11 07:53:04 -07:00
Nicolas Mowen
173b7aa308 Handle case where user has multiple manual events on same camera (#15943) 2025-01-11 07:47:45 -07:00
Blake Blackshear
c4727f19e1 Simplify plus submit (#15941)
* remove unused annotate file

* improve plus error messages

* formatting
2025-01-11 07:04:11 -07:00
Josh Hawkins
b8a74793ca Clarify motion recording (#15917)
* Clarify motion recording

* move to troubleshooting
2025-01-09 09:55:08 -07:00
Josh Hawkins
c1dede9369 Clarify reolink doorbell two way talk requirements (#15915)
* Clarify reolink doorbell two way talk requirements

* relative paths

* move to live section

* fix link
2025-01-09 09:31:16 -07:00
Nicolas Mowen
0c4ea504d8 Update proxmox docs to align with proxmox recommendation of running in VM. (#15904) 2025-01-08 17:19:04 -06:00
Nicolas Mowen
b265b6b190 Catch case where user has multiple of the same kind of GPU (#15903) 2025-01-08 17:17:57 -06:00
Nicolas Mowen
d57a61b50f Simplify model config (#15881)
* Add migration to migrate to model_path

* Simplify model config

* Cleanup docs

* Set config version

* Formatting

* Fix tests
2025-01-07 20:59:37 -07:00
Nicolas Mowen
4fc9106c17 Update for correct audio requirements (#15882) 2025-01-07 17:02:32 -06:00
Nicolas Mowen
38e098ca31 Remove extra data except from keypackets when using qsv (#15865) 2025-01-06 17:38:46 -06:00
Nicolas Mowen
e7ad38d827 Update model docs (#15779) 2025-01-02 10:04:16 -06:00
Josh Hawkins
a1ce9aacf2 Tracked object details pane bugfix (#15736)
* restore save button in tracked object details pane

* conditionally show save button
2024-12-30 08:23:25 -06:00
Nicolas Mowen
322b847356 Fix event cleanup (#15724) 2024-12-29 14:47:40 -06:00
Josh Hawkins
98338e4c7f Ensure object lifecycle ratio is re-normalized to camera aspect (#15717) 2024-12-28 13:37:39 -07:00
Josh Hawkins
171a89f37b Language consistency - use Explore instead of Search (#15709) 2024-12-27 17:38:43 -07:00
Josh Hawkins
8114b541a8 Sort camera group edit screen by ui config values (#15705) 2024-12-27 14:30:27 -06:00
Josh Hawkins
c48396c5c6 Fix crash when streams are undefined in go2rtc config password cleaning (#15695) 2024-12-27 08:36:21 -06:00
leccelecce
00371546a3 GenAI: add ability to save JPGs sent to provider (#15643)
* GenAI: add ability to save JPGs sent to provider

* Remove mention from GenAI docs

* Change config name to debug_save_thumbnails

* Change  folder structure to clips/genai-requests/{event_id}/{1.jpg}
2024-12-23 07:05:34 -07:00
Nicolas Mowen
87e7b62c85 Remove duplicated rockchip build (#15641) 2024-12-22 13:31:14 -06:00
Nicolas Mowen
15ffe5c254 Fix trt (#15640) 2024-12-22 11:56:04 -07:00
Nicolas Mowen
a767dad3a1 Simplify TensorRT image (#15638) 2024-12-22 12:13:29 -06:00
Josh Hawkins
9387246f83 Add tooltips to ptz controls (#15633) 2024-12-21 17:57:22 -06:00
Nicolas Mowen
bed20de302 Update docs deps (#15617) 2024-12-20 10:37:02 -06:00
Nicolas Mowen
70fc5393b1 Make hailo wheels support any minor version (#15616) 2024-12-20 10:36:32 -06:00
dependabot[bot]
9b80dbe014 Bump actions/setup-python from 5.1.0 to 5.3.0 (#14584)
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.1.0 to 5.3.0.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](https://github.com/actions/setup-python/compare/v5.1.0...v5.3.0)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-20 09:16:21 -07:00
Josh Hawkins
78a013d63a Add "frame" to shm frame names to avoid camera name issues (#15615) 2024-12-20 08:46:40 -06:00
Gabriel de Biasi
ddfe8f3921 Fix #7944: Adds tls_insecure to the onvif configuration (#15603)
* Adds tls_insecure to the onvif configuration

* reformat using ruff
2024-12-19 12:54:33 -07:00
Nicolas Mowen
4af752028f Bug Fixes (#15598)
* Catch onvif command error

* fix review item pre and post capture

* Include severity in query
2024-12-19 09:46:14 -06:00
Nicolas Mowen
b149828c9f Catch OS error (#15590) 2024-12-18 17:45:08 -06:00
Josh Hawkins
3dc26e78ef Genai descriptions are not generated until tracked objects end (#15561) 2024-12-17 17:33:04 -06:00
Giorgio Ughini
d9ef8fa206 Fix always the same image is sent to GenAI (#15550)
* Fix always the same image is sent to GenAI

* Fix typo for bug where identical images are sent to GenAI

* Correct formatting
2024-12-17 07:44:00 -06:00
Josh Hawkins
292499aebc Improve review message again (#15538) 2024-12-16 09:18:34 -07:00
148 changed files with 9267 additions and 2834 deletions

View File

@@ -2,6 +2,7 @@ aarch
absdiff
airockchip
Alloc
alpr
Amcrest
amdgpu
analyzeduration
@@ -61,6 +62,7 @@ dsize
dtype
ECONNRESET
edgetpu
facenet
fastapi
faststart
fflags
@@ -114,6 +116,8 @@ itemsize
Jellyfin
jetson
jetsons
jina
jinaai
joserfc
jsmpeg
jsonify
@@ -187,6 +191,7 @@ openai
opencv
openvino
OWASP
paddleocr
paho
passwordless
popleft
@@ -308,4 +313,4 @@ yolo
yolonas
yolox
zeep
zerolatency
zerolatency

View File

@@ -75,15 +75,6 @@ jobs:
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
- name: Build and push Rockchip build
uses: docker/bake-action@v3
with:
push: true
targets: rk
files: docker/rockchip/rk.hcl
set: |
rk.tags=${{ steps.setup.outputs.image-name }}-rk
*.cache-from=type=gha
jetson_jp4_build:
runs-on: ubuntu-latest
name: Jetson Jetpack 4

View File

@@ -6,7 +6,7 @@ on:
- "docs/**"
env:
DEFAULT_PYTHON: 3.9
DEFAULT_PYTHON: 3.11
jobs:
build_devcontainer:
@@ -76,7 +76,7 @@ jobs:
with:
persist-credentials: false
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
uses: actions/setup-python@v5.1.0
uses: actions/setup-python@v5.3.0
with:
python-version: ${{ env.DEFAULT_PYTHON }}
- name: Install requirements

View File

@@ -1,7 +1,7 @@
default_target: local
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
VERSION = 0.15.0
VERSION = 0.16.0
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
BOARDS= #Initialized empty

View File

@@ -61,7 +61,7 @@ def start(id, num_detections, detection_queue, event):
object_detector.cleanup()
print(f"{id} - Processed for {duration:.2f} seconds.")
print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
print(f"{id} - Average frame processing time: {mean(frame_times) * 1000:.2f}ms")
######

View File

@@ -5,6 +5,7 @@ ARG DEBIAN_FRONTEND=noninteractive
# Build Python wheels
FROM wheels AS h8l-wheels
RUN python3 -m pip config set global.break-system-packages true
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
COPY docker/hailo8l/requirements-wheels-h8l.txt /requirements-wheels-h8l.txt
@@ -30,6 +31,7 @@ COPY --from=hailort /hailo-wheels /deps/hailo-wheels
COPY --from=hailort /rootfs/ /
# Install the wheels
RUN python3 -m pip config set global.break-system-packages true
RUN pip3 install -U /deps/h8l-wheels/*.whl
RUN pip3 install -U /deps/hailo-wheels/*.whl

View File

@@ -2,7 +2,7 @@
set -euxo pipefail
hailo_version="4.19.0"
hailo_version="4.20.0"
if [[ "${TARGETARCH}" == "amd64" ]]; then
arch="x86_64"
@@ -15,5 +15,5 @@ wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_ver
mkdir -p /hailo-wheels
wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp39-cp39-linux_${arch}.whl"
wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl"

View File

@@ -1,12 +1,12 @@
appdirs==1.4.4
argcomplete==2.0.0
contextlib2==0.6.0.post1
distlib==0.3.6
filelock==3.8.0
future==0.18.2
importlib-metadata==5.1.0
importlib-resources==5.1.2
netaddr==0.8.0
netifaces==0.10.9
verboselogs==1.7
virtualenv==20.17.0
appdirs==1.4.*
argcomplete==2.0.*
contextlib2==0.6.*
distlib==0.3.*
filelock==3.8.*
future==0.18.*
importlib-metadata==5.1.*
importlib-resources==5.1.*
netaddr==0.8.*
netifaces==0.10.*
verboselogs==1.7.*
virtualenv==20.17.*

View File

@@ -4,6 +4,7 @@
sudo apt-get update
sudo apt-get install -y build-essential cmake git wget
hailo_version="4.20.0"
arch=$(uname -m)
if [[ $arch == "x86_64" ]]; then
@@ -13,7 +14,7 @@ else
fi
# Clone the HailoRT driver repository
git clone --depth 1 --branch v4.19.0 https://github.com/hailo-ai/hailort-drivers.git
git clone --depth 1 --branch v${hailo_version} https://github.com/hailo-ai/hailort-drivers.git
# Build and install the HailoRT driver
cd hailort-drivers/linux/pcie

View File

@@ -3,12 +3,12 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
ARG BASE_IMAGE=debian:11
ARG SLIM_BASE=debian:11-slim
ARG BASE_IMAGE=debian:12
ARG SLIM_BASE=debian:12-slim
FROM ${BASE_IMAGE} AS base
FROM --platform=${BUILDPLATFORM} debian:11 AS base_host
FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
FROM ${SLIM_BASE} AS slim-base
@@ -66,8 +66,8 @@ COPY docker/main/requirements-ov.txt /requirements-ov.txt
RUN apt-get -qq update \
&& apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
&& wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& python3 get-pip.py "pip" \
&& pip install -r /requirements-ov.txt
&& python3 get-pip.py "pip" --break-system-packages \
&& pip install --break-system-packages -r /requirements-ov.txt
# Get OpenVino Model
RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
@@ -139,24 +139,17 @@ ARG TARGETARCH
# Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \
&& apt-get -qq install -y \
apt-transport-https \
gnupg \
wget \
# the key fingerprint can be obtained from https://ftp-master.debian.org/keys.html
&& wget -qO- "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xA4285295FC7B1A81600062A9605C66F00D6C9793" | \
gpg --dearmor > /usr/share/keyrings/debian-archive-bullseye-stable.gpg \
&& echo "deb [signed-by=/usr/share/keyrings/debian-archive-bullseye-stable.gpg] http://deb.debian.org/debian bullseye main contrib non-free" | \
tee /etc/apt/sources.list.d/debian-bullseye-nonfree.list \
apt-transport-https wget \
&& apt-get -qq update \
&& apt-get -qq install -y \
python3.9 \
python3.9-dev \
python3 \
python3-dev \
# opencv dependencies
build-essential cmake git pkg-config libgtk-3-dev \
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev \
gfortran openexr libatlas-base-dev libssl-dev\
libtbb2 libtbb-dev libdc1394-22-dev libopenexr-dev \
libtbbmalloc2 libtbb-dev libdc1394-dev libopenexr-dev \
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
# sqlite3 dependencies
tclsh \
@@ -164,14 +157,11 @@ RUN apt-get -qq update \
gcc gfortran libopenblas-dev liblapack-dev && \
rm -rf /var/lib/apt/lists/*
# Ensure python3 defaults to python3.9
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& python3 get-pip.py "pip"
&& python3 get-pip.py "pip" --break-system-packages
COPY docker/main/requirements.txt /requirements.txt
RUN pip3 install -r /requirements.txt
RUN pip3 install -r /requirements.txt --break-system-packages
# Build pysqlite3 from source
COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
@@ -222,8 +212,8 @@ RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_de
/deps/install_deps.sh
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
python3 -m pip install --upgrade pip && \
pip3 install -U /deps/wheels/*.whl
python3 -m pip install --upgrade pip --break-system-packages && \
pip3 install -U /deps/wheels/*.whl --break-system-packages
COPY --from=deps-rootfs / /
@@ -270,7 +260,7 @@ RUN apt-get update \
&& rm -rf /var/lib/apt/lists/*
RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \
pip3 install -r requirements-dev.txt
pip3 install -r requirements-dev.txt --break-system-packages
HEALTHCHECK NONE

View File

@@ -8,8 +8,7 @@ SECURE_TOKEN_MODULE_VERSION="1.5"
SET_MISC_MODULE_VERSION="v0.33"
NGX_DEVEL_KIT_VERSION="v0.3.3"
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
sed -i '/^Types:/s/deb/& deb-src/' /etc/apt/sources.list.d/debian.sources
apt-get update
apt-get -yqq build-dep nginx

View File

@@ -4,7 +4,7 @@ from openvino.tools import mo
ov_model = mo.convert_model(
"/models/ssdlite_mobilenet_v2_coco_2018_05_09/frozen_inference_graph.pb",
compress_to_fp16=True,
transformations_config="/usr/local/lib/python3.9/dist-packages/openvino/tools/mo/front/tf/ssd_v2_support.json",
transformations_config="/usr/local/lib/python3.11/dist-packages/openvino/tools/mo/front/tf/ssd_v2_support.json",
tensorflow_object_detection_api_pipeline_config="/models/ssdlite_mobilenet_v2_coco_2018_05_09/pipeline.config",
reverse_input_channels=True,
)

View File

@@ -4,8 +4,7 @@ set -euxo pipefail
SQLITE_VEC_VERSION="0.1.3"
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
sed -i '/^Types:/s/deb/& deb-src/' /etc/apt/sources.list.d/debian.sources
apt-get update
apt-get -yqq build-dep sqlite3 gettext git

View File

@@ -11,33 +11,34 @@ apt-get -qq install --no-install-recommends -y \
lbzip2 \
procps vainfo \
unzip locales tzdata libxml2 xz-utils \
python3.9 \
python3 \
python3-pip \
curl \
lsof \
jq \
nethogs
# ensure python3 defaults to python3.9
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
nethogs \
libgl1 \
libglib2.0-0 \
libusb-1.0.0
mkdir -p -m 600 /root/.gnupg
# add coral repo
curl -fsSLo - https://packages.cloud.google.com/apt/doc/apt-key.gpg | \
gpg --dearmor -o /etc/apt/trusted.gpg.d/google-cloud-packages-archive-keyring.gpg
echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | tee /etc/apt/sources.list.d/coral-edgetpu.list
echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections
# install coral runtime
wget -q -O /tmp/libedgetpu1-max.deb "https://github.com/feranick/libedgetpu/releases/download/16.0TF2.17.0-1/libedgetpu1-max_16.0tf2.17.0-1.bookworm_${TARGETARCH}.deb"
unset DEBIAN_FRONTEND
yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive
rm /tmp/libedgetpu1-max.deb
# enable non-free repo in Debian
if grep -q "Debian" /etc/issue; then
sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list
# install python3 & tflite runtime
if [[ "${TARGETARCH}" == "amd64" ]]; then
pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.0/tflite_runtime-2.17.0-cp311-cp311-linux_x86_64.whl
pip3 install --break-system-packages https://github.com/feranick/pycoral/releases/download/2.0.2TF2.17.0/pycoral-2.0.2-cp311-cp311-linux_x86_64.whl
fi
# coral drivers
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y \
libedgetpu1-max python3-tflite-runtime python3-pycoral
if [[ "${TARGETARCH}" == "arm64" ]]; then
pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.0/tflite_runtime-2.17.0-cp311-cp311-linux_aarch64.whl
pip3 install --break-system-packages https://github.com/feranick/pycoral/releases/download/2.0.2TF2.17.0/pycoral-2.0.2-cp311-cp311-linux_aarch64.whl
fi
# btbn-ffmpeg -> amd64
if [[ "${TARGETARCH}" == "amd64" ]]; then
@@ -65,23 +66,15 @@ fi
# arch specific packages
if [[ "${TARGETARCH}" == "amd64" ]]; then
# use debian bookworm for amd / intel-i965 driver packages
echo 'deb https://deb.debian.org/debian bookworm main contrib non-free' >/etc/apt/sources.list.d/debian-bookworm.list
apt-get -qq update
# install amd / intel-i965 driver packages
apt-get -qq install --no-install-recommends --no-install-suggests -y \
i965-va-driver intel-gpu-tools onevpl-tools \
libva-drm2 \
mesa-va-drivers radeontop
# something about this dependency requires it to be installed in a separate call rather than in the line above
apt-get -qq install --no-install-recommends --no-install-suggests -y \
i965-va-driver-shaders
# intel packages use zst compression so we need to update dpkg
apt-get install -y dpkg
rm -f /etc/apt/sources.list.d/debian-bookworm.list
# use intel apt intel packages
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list

View File

@@ -10,10 +10,10 @@ imutils == 0.5.*
joserfc == 1.0.*
pathvalidate == 3.2.*
markupsafe == 2.1.*
python-multipart == 0.0.12
# General
mypy == 1.6.1
numpy == 1.26.*
onvif_zeep == 0.2.12
opencv-python-headless == 4.9.0.*
onvif-zeep-async == 3.1.*
paho-mqtt == 2.1.*
pandas == 2.2.*
peewee == 3.17.*
@@ -27,15 +27,19 @@ ruamel.yaml == 0.18.*
tzlocal == 5.2
requests == 2.32.*
types-requests == 2.32.*
scipy == 1.13.*
norfair == 2.2.*
setproctitle == 1.3.*
ws4py == 0.5.*
unidecode == 1.3.*
# Image Manipulation
numpy == 1.26.*
opencv-python-headless == 4.10.0.*
opencv-contrib-python == 4.9.0.*
scipy == 1.14.*
# OpenVino & ONNX
openvino == 2024.3.*
onnxruntime-openvino == 1.19.* ; platform_machine == 'x86_64'
onnxruntime == 1.19.* ; platform_machine == 'aarch64'
openvino == 2024.4.*
onnxruntime-openvino == 1.20.* ; platform_machine == 'x86_64'
onnxruntime == 1.20.* ; platform_machine == 'aarch64'
# Embeddings
transformers == 4.45.*
# Generative AI
@@ -45,3 +49,6 @@ openai == 1.51.*
# push notifications
py-vapid == 1.9.*
pywebpush == 2.0.*
# alpr
pyclipper == 1.3.*
shapely == 2.0.*

View File

@@ -1,2 +1,2 @@
scikit-build == 0.17.*
scikit-build == 0.18.*
nvidia-pyindex

View File

@@ -81,6 +81,9 @@ http {
open_file_cache_errors on;
aio on;
# file upload size
client_max_body_size 10M;
# https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool
vod_open_file_thread_pool default;

View File

@@ -0,0 +1,20 @@
./subset/000000005001.jpg
./subset/000000038829.jpg
./subset/000000052891.jpg
./subset/000000075612.jpg
./subset/000000098261.jpg
./subset/000000181542.jpg
./subset/000000215245.jpg
./subset/000000277005.jpg
./subset/000000288685.jpg
./subset/000000301421.jpg
./subset/000000334371.jpg
./subset/000000348481.jpg
./subset/000000373353.jpg
./subset/000000397681.jpg
./subset/000000414673.jpg
./subset/000000419312.jpg
./subset/000000465822.jpg
./subset/000000475732.jpg
./subset/000000559707.jpg
./subset/000000574315.jpg

Binary file not shown.

After

Width:  |  Height:  |  Size: 207 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 209 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 150 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 102 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 201 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 233 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 242 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 230 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 281 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 272 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 152 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 166 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 103 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 203 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

View File

@@ -7,21 +7,26 @@ FROM wheels as rk-wheels
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
RUN sed -i "/onnxruntime/d" /requirements-wheels.txt
RUN python3 -m pip config set global.break-system-packages true
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
RUN rm -rf /rk-wheels/opencv_python-*
FROM deps AS rk-frigate
ARG TARGETARCH
RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
pip3 install -U /deps/rk-wheels/*.whl
pip3 install --no-deps -U /deps/rk-wheels/*.whl --break-system-packages
WORKDIR /opt/frigate/
COPY --from=rootfs / /
COPY docker/rockchip/COCO /COCO
COPY docker/rockchip/conv2rknn.py /opt/conv2rknn.py
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/librknnrt.so /usr/lib/
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/librknnrt.so /usr/lib/
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffmpeg /usr/lib/ffmpeg/6.0/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffprobe /usr/lib/ffmpeg/6.0/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-6/ffmpeg /usr/lib/ffmpeg/6.0/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-6/ffprobe /usr/lib/ffmpeg/6.0/bin/
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}"

View File

@@ -0,0 +1,82 @@
import os
import rknn
import yaml
from rknn.api import RKNN
try:
with open(rknn.__path__[0] + "/VERSION") as file:
tk_version = file.read().strip()
except FileNotFoundError:
pass
try:
with open("/config/conv2rknn.yaml", "r") as config_file:
configuration = yaml.safe_load(config_file)
except FileNotFoundError:
raise Exception("Please place a config.yaml file in /config/conv2rknn.yaml")
if configuration["config"] != None:
rknn_config = configuration["config"]
else:
rknn_config = {}
if not os.path.isdir("/config/model_cache/rknn_cache/onnx"):
raise Exception(
"Place the onnx models you want to convert to rknn format in /config/model_cache/rknn_cache/onnx"
)
if "soc" not in configuration:
try:
with open("/proc/device-tree/compatible") as file:
soc = file.read().split(",")[-1].strip("\x00")
except FileNotFoundError:
raise Exception("Make sure to run docker in privileged mode.")
configuration["soc"] = [
soc,
]
if "quantization" not in configuration:
configuration["quantization"] = False
if "output_name" not in configuration:
configuration["output_name"] = "{{input_basename}}"
for input_filename in os.listdir("/config/model_cache/rknn_cache/onnx"):
for soc in configuration["soc"]:
quant = "i8" if configuration["quantization"] else "fp16"
input_path = "/config/model_cache/rknn_cache/onnx/" + input_filename
input_basename = input_filename[: input_filename.rfind(".")]
output_filename = (
configuration["output_name"].format(
quant=quant,
input_basename=input_basename,
soc=soc,
tk_version=tk_version,
)
+ ".rknn"
)
output_path = "/config/model_cache/rknn_cache/" + output_filename
rknn_config["target_platform"] = soc
rknn = RKNN(verbose=True)
rknn.config(**rknn_config)
if rknn.load_onnx(model=input_path) != 0:
raise Exception("Error loading model.")
if (
rknn.build(
do_quantization=configuration["quantization"],
dataset="/COCO/coco_subset_20.txt",
)
!= 0
):
raise Exception("Error building model.")
if rknn.export_rknn(output_path) != 0:
raise Exception("Error exporting rknn model.")

View File

@@ -1 +1,2 @@
rknn-toolkit-lite2 @ https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/rknn_toolkit_lite2-2.0.0b0-cp39-cp39-linux_aarch64.whl
rknn-toolkit2 == 2.3.0
rknn-toolkit-lite2 == 2.3.0

View File

@@ -34,7 +34,7 @@ RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/
RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf
#######################################################################
FROM --platform=linux/amd64 debian:11 as debian-base
FROM --platform=linux/amd64 debian:12 as debian-base
RUN apt-get update && apt-get -y upgrade
RUN apt-get -y install --no-install-recommends libelf1 libdrm2 libdrm-amdgpu1 libnuma1 kmod
@@ -51,7 +51,7 @@ COPY --from=rocm /opt/rocm-$ROCM /opt/rocm-$ROCM
RUN ln -s /opt/rocm-$ROCM /opt/rocm
RUN apt-get -y install g++ cmake
RUN apt-get -y install python3-pybind11 python3.9-distutils python3-dev
RUN apt-get -y install python3-pybind11 python3-distutils python3-dev
WORKDIR /opt/build
@@ -70,10 +70,11 @@ RUN apt-get -y install libnuma1
WORKDIR /opt/frigate/
COPY --from=rootfs / /
COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
RUN python3 -m pip install --upgrade pip \
&& pip3 uninstall -y onnxruntime-openvino \
&& pip3 install -r /requirements.txt
# Temporarily disabled to see if a new wheel can be built to support py3.11
#COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
#RUN python3 -m pip install --upgrade pip \
# && pip3 uninstall -y onnxruntime-openvino \
# && pip3 install -r /requirements.txt
#######################################################################
FROM scratch AS rocm-dist
@@ -86,12 +87,12 @@ COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
COPY --from=rocm /opt/rocm-dist/ /
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-39-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-311-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
#######################################################################
FROM deps-prelim AS rocm-prelim-hsa-override0
ENV HSA_ENABLE_SDMA=0
\
ENV HSA_ENABLE_SDMA=0
COPY --from=rocm-dist / /

View File

@@ -24,7 +24,7 @@ sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list
if [[ "${TARGETARCH}" == "arm64" ]]; then
# add raspberry pi repo
gpg --no-default-keyring --keyring /usr/share/keyrings/raspbian.gpg --keyserver keyserver.ubuntu.com --recv-keys 82B129927FA3303E
echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bullseye main" | tee /etc/apt/sources.list.d/raspi.list
echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bookworm main" | tee /etc/apt/sources.list.d/raspi.list
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg
fi

View File

@@ -7,33 +7,19 @@ ARG DEBIAN_FRONTEND=noninteractive
FROM wheels as trt-wheels
ARG DEBIAN_FRONTEND
ARG TARGETARCH
RUN python3 -m pip config set global.break-system-packages true
# Add TensorRT wheels to another folder
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
# Build CuDNN
FROM wget AS cudnn-deps
ARG COMPUTE_LEVEL
RUN apt-get update \
&& apt-get install -y git build-essential
RUN wget https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.1-1_all.deb \
&& dpkg -i cuda-keyring_1.1-1_all.deb \
&& apt-get update \
&& apt-get -y install cuda-toolkit \
&& rm -rf /var/lib/apt/lists/*
FROM tensorrt-base AS frigate-tensorrt
ENV TRT_VER=8.5.3
ENV TRT_VER=8.6.1
RUN python3 -m pip config set global.break-system-packages true
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl && \
pip3 install -U /deps/trt-wheels/*.whl --break-system-packages && \
ldconfig
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
ENV LD_LIBRARY_PATH=/usr/local/lib/python3.9/dist-packages/tensorrt:/usr/local/cuda/lib64:/usr/local/lib/python3.9/dist-packages/nvidia/cufft/lib
WORKDIR /opt/frigate/
COPY --from=rootfs / /
@@ -42,8 +28,8 @@ FROM devcontainer AS devcontainer-trt
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
COPY --from=trt-deps /usr/local/cuda-12.1 /usr/local/cuda
COPY docker/tensorrt/detector/rootfs/ /
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl
pip3 install -U /deps/trt-wheels/*.whl --break-system-packages

View File

@@ -41,11 +41,11 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,t
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
ADD https://nvidia.box.com/shared/static/9aemm4grzbbkfaesg5l7fplgjtmswhj8.whl /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
ADD https://nvidia.box.com/shared/static/psl23iw3bh7hlgku0mjo1xekxpego3e3.whl /tmp/onnxruntime_gpu-1.15.1-cp311-cp311-linux_aarch64.whl
RUN pip3 uninstall -y onnxruntime-openvino \
&& pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt \
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp311-cp311-linux_aarch64.whl
FROM build-wheels AS trt-model-wheels
ARG DEBIAN_FRONTEND

View File

@@ -3,7 +3,7 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.03-py3
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.12-py3
# Build TensorRT-specific library
FROM ${TRT_BASE} AS trt-deps
@@ -24,6 +24,7 @@ ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
COPY --from=trt-deps /usr/local/cuda-12.* /usr/local/cuda
COPY docker/tensorrt/detector/rootfs/ /
ENV YOLO_MODELS=""

View File

@@ -1,6 +1,8 @@
/usr/local/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cudnn/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cuda_runtime/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cublas/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cuda_nvrtc/lib
/usr/local/lib/python3.9/dist-packages/tensorrt
/usr/local/cuda/lib64
/usr/local/lib/python3.11/dist-packages/nvidia/cudnn/lib
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_runtime/lib
/usr/local/lib/python3.11/dist-packages/nvidia/cublas/lib
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_nvrtc/lib
/usr/local/lib/python3.11/dist-packages/tensorrt
/usr/local/lib/python3.11/dist-packages/nvidia/cufft/lib

View File

@@ -1,9 +1,9 @@
# NVidia TensorRT Support (amd64 only)
--extra-index-url 'https://pypi.nvidia.com'
numpy < 1.24; platform_machine == 'x86_64'
tensorrt == 8.5.3.*; platform_machine == 'x86_64'
cuda-python == 11.8; platform_machine == 'x86_64'
cython == 0.29.*; platform_machine == 'x86_64'
tensorrt == 8.6.1.*; platform_machine == 'x86_64'
cuda-python == 11.8.*; platform_machine == 'x86_64'
cython == 3.0.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'

View File

@@ -41,6 +41,7 @@ cameras:
...
onvif:
# Required: host of the camera being connected to.
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
host: 0.0.0.0
# Optional: ONVIF port for device (default: shown below).
port: 8000
@@ -49,6 +50,8 @@ cameras:
user: admin
# Optional: password for login.
password: admin
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
tls_insecure: False
# Optional: PTZ camera object autotracking. Keeps a moving object in
# the center of the frame by automatically moving the PTZ camera.
autotracking:

View File

@@ -67,14 +67,15 @@ ffmpeg:
### Annke C800
This camera is H.265 only. To be able to play clips on some devices (like MacOs or iPhone) the H.265 stream has to be repackaged and the audio stream has to be converted to aac. Unfortunately direct playback of in the browser is not working (yet), but the downloaded clip can be played locally.
This camera is H.265 only. To be able to play clips on some devices (like MacOs or iPhone) the H.265 stream has to be adjusted using the `apple_compatibility` config.
```yaml
cameras:
annkec800: # <------ Name the camera
ffmpeg:
apple_compatibility: true # <- Adds compatibility with MacOS and iPhone
output_args:
record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -tag:v hvc1 -bsf:v hevc_mp4toannexb -c:a aac
record: preset-record-generic-audio-aac
inputs:
- path: rtsp://user:password@camera-ip:554/H264/ch1/main/av_stream # <----- Update for your camera
@@ -156,7 +157,9 @@ cameras:
#### Reolink Doorbell
The reolink doorbell supports 2-way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
The reolink doorbell supports two way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
Ensure HTTP is enabled in the camera's advanced network settings. To use two way talk with Frigate, see the [Live view documentation](/configuration/live#two-way-talk).
```yaml
go2rtc:

View File

@@ -0,0 +1,35 @@
---
id: face_recognition
title: Face Recognition
---
Face recognition allows people to be assigned names and when their face is recognized Frigate will assign the person's name as a sub label. This information is included in the UI, filters, as well as in notifications.
Frigate has support for FaceNet to create face embeddings, which runs locally. Embeddings are then saved to Frigate's database.
## Minimum System Requirements
Face recognition works by running a large AI model locally on your system. Systems without a GPU will not run Face Recognition reliably or at all.
## Configuration
Face recognition is disabled by default and requires semantic search to be enabled, face recognition must be enabled in your config file before it can be used. Semantic Search and face recognition are global configuration settings.
```yaml
face_recognition:
enabled: true
```
## Dataset
The number of images needed for a sufficient training set for face recognition varies depending on several factors:
- Complexity of the task: A simple task like recognizing faces of known individuals may require fewer images than a complex task like identifying unknown individuals in a large crowd.
- Diversity of the dataset: A dataset with diverse images, including variations in lighting, pose, and facial expressions, will require fewer images per person than a less diverse dataset.
- Desired accuracy: The higher the desired accuracy, the more images are typically needed.
However, here are some general guidelines:
- Minimum: For basic face recognition tasks, a minimum of 10-20 images per person is often recommended.
- Recommended: For more robust and accurate systems, 30-50 images per person is a good starting point.
- Ideal: For optimal performance, especially in challenging conditions, 100 or more images per person can be beneficial.

View File

@@ -5,6 +5,8 @@ title: Generative AI
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle. Descriptions can also be regenerated manually via the Frigate UI.
:::info
Semantic Search must be enabled to use Generative AI.

View File

@@ -175,6 +175,16 @@ For more information on the various values across different distributions, see h
Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=2 >> /etc/sysctl.d/local.conf'`
#### Stats for SR-IOV devices
When using virtualized GPUs via SR-IOV, additional args are needed for GPU stats to function. This can be enabled with the following config:
```yaml
telemetry:
stats:
sriov: True
```
## AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.

View File

@@ -203,14 +203,13 @@ detectors:
ov:
type: openvino
device: AUTO
model:
path: /openvino-model/ssdlite_mobilenet_v2.xml
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
path: /openvino-model/ssdlite_mobilenet_v2.xml
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
record:

View File

@@ -0,0 +1,45 @@
---
id: license_plate_recognition
title: License Plate Recognition (LPR)
---
Frigate can recognize license plates on vehicles and automatically add the detected characters as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street with a dedicated LPR camera.
Users running a Frigate+ model should ensure that `license_plate` is added to the [list of objects to track](https://docs.frigate.video/plus/#available-label-types) either globally or for a specific camera. This will improve the accuracy and performance of the LPR model.
LPR is most effective when the vehicles license plate is fully visible to the camera. For moving vehicles, Frigate will attempt to read the plate continuously, refining its detection and keeping the most confident result. LPR will not run on stationary vehicles.
## Minimum System Requirements
License plate recognition works by running AI models locally on your system. The models are relatively lightweight and run on your CPU. At least 4GB of RAM is required.
## Configuration
License plate recognition is disabled by default. Enable it in your config file:
```yaml
lpr:
enabled: true
```
## Advanced Configuration
Several options are available to fine-tune the LPR feature. For example, you can adjust the `min_area` setting, which defines the minimum size in pixels a license plate must be before LPR runs. The default is 500 pixels.
Additionally, you can define `known_plates` as strings or regular expressions, allowing Frigate to label tracked vehicles with custom sub_labels when a recognized plate is detected. This information is then accessible in the UI, filters, and notifications.
```yaml
lpr:
enabled: true
min_area: 500
known_plates:
Wife's Car:
- "ABC-1234"
- "ABC-I234"
Johnny:
- "J*N-*234" # Using wildcards for H/M and 1/I
Sally:
- "[S5]LL-1234" # Matches SLL-1234 and 5LL-1234
```
In this example, "Wife's Car" will appear as the label for any vehicle matching the plate "ABC-1234." The model might occasionally interpret the digit 1 as a capital I (e.g., "ABC-I234"), so both variations are listed. Similarly, multiple possible variations are specified for Johnny and Sally.

View File

@@ -29,7 +29,7 @@ The default video and audio codec on your camera may not always be compatible wi
### Audio Support
MSE Requires AAC audio, WebRTC requires PCMU/PCMA, or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
MSE Requires PCMA/PCMU or AAC audio, WebRTC requires PCMA/PCMU or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
```yaml
go2rtc:
@@ -138,3 +138,13 @@ services:
:::
See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.3#module-webrtc) for more information about this.
### Two way talk
For devices that support two way talk, Frigate can be configured to use the feature from the camera's Live view in the Web UI. You should:
- Set up go2rtc with [WebRTC](#webrtc-extra-configuration).
- Ensure you access Frigate via https (may require [opening port 8971](/frigate/installation/#ports)).
- For the Home Assistant Frigate card, [follow the docs](https://github.com/dermotduffy/frigate-hass-card?tab=readme-ov-file#using-2-way-audio) for the correct source.
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell)

View File

@@ -144,7 +144,9 @@ detectors:
#### SSDLite MobileNet v2
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model.
Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model:
```yaml
detectors:
@@ -254,6 +256,7 @@ yolov4x-mish-640
yolov7-tiny-288
yolov7-tiny-416
yolov7-640
yolov7-416
yolov7-320
yolov7x-640
yolov7x-320
@@ -282,6 +285,8 @@ The TensorRT detector can be selected by specifying `tensorrt` as the model type
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
Use the config below to work with generated TRT models:
```yaml
detectors:
tensorrt:
@@ -501,11 +506,12 @@ detectors:
cpu1:
type: cpu
num_threads: 3
model:
path: "/custom_model.tflite"
cpu2:
type: cpu
num_threads: 3
model:
path: "/custom_model.tflite"
```
When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance.
@@ -544,7 +550,7 @@ Hardware accelerated object detection is supported on the following SoCs:
- RK3576
- RK3588
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.0.0.beta0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
### Prerequisites
@@ -617,7 +623,41 @@ $ cat /sys/kernel/debug/rknpu/load
:::
- All models are automatically downloaded and stored in the folder `config/model_cache/rknn_cache`. After upgrading Frigate, you should remove older models to free up space.
- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2` (requires a x86 machine). Note, that there is only post-processing for the supported models.
- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2`. Note, that there is only post-processing for the supported models.
### Converting your own onnx model to rknn format
To convert a onnx model to the rknn format using the [rknn-toolkit2](https://github.com/airockchip/rknn-toolkit2/) you have to:
- Place one ore more models in onnx format in the directory `config/model_cache/rknn_cache/onnx` on your docker host (this might require `sudo` privileges).
- Save the configuration file under `config/conv2rknn.yaml` (see below for details).
- Run `docker exec <frigate_container_id> python3 /opt/conv2rknn.py`. If the conversion was successful, the rknn models will be placed in `config/model_cache/rknn_cache`.
This is an example configuration file that you need to adjust to your specific onnx model:
```yaml
soc: ["rk3562","rk3566", "rk3568", "rk3576", "rk3588"]
quantization: false
output_name: "{input_basename}"
config:
mean_values: [[0, 0, 0]]
std_values: [[255, 255, 255]]
quant_img_rgb2bgr: true
```
Explanation of the paramters:
- `soc`: A list of all SoCs you want to build the rknn model for. If you don't specify this parameter, the script tries to find out your SoC and builds the rknn model for this one.
- `quantization`: true: 8 bit integer (i8) quantization, false: 16 bit float (fp16). Default: false.
- `output_name`: The output name of the model. The following variables are available:
- `quant`: "i8" or "fp16" depending on the config
- `input_basename`: the basename of the input model (e.g. "my_model" if the input model is calles "my_model.onnx")
- `soc`: the SoC this model was build for (e.g. "rk3588")
- `tk_version`: Version of `rknn-toolkit2` (e.g. "2.3.0")
- **example**: Specifying `output_name = "frigate-{quant}-{input_basename}-{soc}-v{tk_version}"` could result in a model called `frigate-i8-my_model-rk3588-v2.3.0.rknn`.
- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.0_EN.pdf).
## Hailo-8l
@@ -632,8 +672,6 @@ detectors:
hailo8l:
type: hailo8l
device: PCIe
model:
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
model:
width: 300
@@ -641,4 +679,5 @@ model:
input_tensor: nhwc
input_pixel_format: bgr
model_type: ssd
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
```

View File

@@ -52,7 +52,7 @@ detectors:
# Required: name of the detector
detector_name:
# Required: type of the detector
# Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below)
# Frigate provides many types, see https://docs.frigate.video/configuration/object_detectors for more details (default: shown below)
# Additional detector types can also be plugged in.
# Detectors may require additional configuration.
# Refer to the Detectors configuration page for more information.
@@ -117,25 +117,27 @@ auth:
hash_iterations: 600000
# Optional: model modifications
# NOTE: The default values are for the EdgeTPU detector.
# Other detectors will require the model config to be set.
model:
# Optional: path to the model (default: automatic based on detector)
# Required: path to the model (default: automatic based on detector)
path: /edgetpu_model.tflite
# Optional: path to the labelmap (default: shown below)
# Required: path to the labelmap (default: shown below)
labelmap_path: /labelmap.txt
# Required: Object detection model input width (default: shown below)
width: 320
# Required: Object detection model input height (default: shown below)
height: 320
# Optional: Object detection model input colorspace
# Required: Object detection model input colorspace
# Valid values are rgb, bgr, or yuv. (default: shown below)
input_pixel_format: rgb
# Optional: Object detection model input tensor format
# Required: Object detection model input tensor format
# Valid values are nhwc or nchw (default: shown below)
input_tensor: nhwc
# Optional: Object detection model type, currently only used with the OpenVINO detector
# Required: Object detection model type, currently only used with the OpenVINO detector
# Valid values are ssd, yolox, yolonas (default: shown below)
model_type: ssd
# Optional: Label name modifications. These are merged into the standard labelmap.
# Required: Label name modifications. These are merged into the standard labelmap.
labelmap:
2: vehicle
# Optional: Map of object labels to their attribute labels (default: depends on model)
@@ -242,6 +244,8 @@ ffmpeg:
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
# NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout.
retry_interval: 10
# Optional: Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players. (default: shown below)
apple_compatibility: false
# Optional: Detect configuration
# NOTE: Can be overridden at the camera level
@@ -522,6 +526,14 @@ semantic_search:
# NOTE: small model runs on CPU and large model runs on GPU
model_size: "small"
# Optional: Configuration for face recognition capability
face_recognition:
# Optional: Enable semantic search (default: shown below)
enabled: False
# Optional: Set the model size used for embeddings. (default: shown below)
# NOTE: small model runs on CPU and large model runs on GPU
model_size: "small"
# Optional: Configuration for AI generated tracked object descriptions
# NOTE: Semantic Search must be enabled for this to do anything.
# WARNING: Depending on the provider, this will send thumbnails over the internet
@@ -686,6 +698,7 @@ cameras:
# to enable PTZ controls.
onvif:
# Required: host of the camera being connected to.
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
host: 0.0.0.0
# Optional: ONVIF port for device (default: shown below).
port: 8000
@@ -694,6 +707,8 @@ cameras:
user: admin
# Optional: password for login.
password: admin
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
tls_insecure: False
# Optional: Ignores time synchronization mismatches between the camera and the server during authentication.
# Using NTP on both ends is recommended and this should only be set to True in a "safe" environment due to the security risk it represents.
ignore_time_mismatch: False
@@ -757,6 +772,8 @@ cameras:
- cat
# Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify)
required_zones: []
# Optional: Save thumbnails sent to generative AI for review/debugging purposes (default: shown below)
debug_save_thumbnails: False
# Optional
ui:
@@ -798,11 +815,13 @@ telemetry:
- lo
# Optional: Configure system stats
stats:
# Enable AMD GPU stats (default: shown below)
# Optional: Enable AMD GPU stats (default: shown below)
amd_gpu_stats: True
# Enable Intel GPU stats (default: shown below)
# Optional: Enable Intel GPU stats (default: shown below)
intel_gpu_stats: True
# Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
# Optional: Treat GPU as SR-IOV to fix GPU stats (default: shown below)
sriov: False
# Optional: Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
# NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled.
network_bandwidth: False
# Optional: Enable the latest version outbound check (default: shown below)

View File

@@ -305,8 +305,15 @@ To install make sure you have the [community app plugin here](https://forums.unr
## Proxmox
It is recommended to run Frigate in LXC, rather than in a VM, for maximum performance. The setup can be complex so be prepared to read the Proxmox and LXC documentation. Suggestions include:
[According to Proxmox documentation](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_pct) it is recommended that you run application containers like Frigate inside a Proxmox QEMU VM. This will give you all the advantages of application containerization, while also providing the benefits that VMs offer, such as strong isolation from the host and the ability to live-migrate, which otherwise isnt possible with containers.
:::warning
If you choose to run Frigate via LXC in Proxmox the setup can be complex so be prepared to read the Proxmox and LXC documentation, Frigate does not officially support running inside of an LXC.
:::
Suggestions include:
- For Intel-based hardware acceleration, to allow access to the `/dev/dri/renderD128` device with major number 226 and minor number 128, add the following lines to the `/etc/pve/lxc/<id>.conf` LXC configuration:
- `lxc.cgroup2.devices.allow: c 226:128 rwm`
- `lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file`

View File

@@ -3,7 +3,15 @@ id: recordings
title: Troubleshooting Recordings
---
### WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
## I have Frigate configured for motion recording only, but it still seems to be recording even with no motion. Why?
You'll want to:
- Make sure your camera's timestamp is masked out with a motion mask. Even if there is no motion occurring in your scene, your motion settings may be sensitive enough to count your timestamp as motion.
- If you have audio detection enabled, keep in mind that audio that is heard above `min_volume` is considered motion.
- [Tune your motion detection settings](/configuration/motion_detection) either by editing your config file or by using the UI's Motion Tuner.
## I see the message: WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
This error can be caused by a number of different issues. The first step in troubleshooting is to enable debug logging for recording. This will enable logging showing how long it takes for recordings to be moved from RAM cache to the disk.
@@ -40,6 +48,7 @@ On linux, some helpful tools/commands in diagnosing would be:
On modern linux kernels, the system will utilize some swap if enabled. Setting vm.swappiness=1 no longer means that the kernel will only swap in order to avoid OOM. To prevent any swapping inside a container, set allocations memory and memory+swap to be the same and disable swapping by setting the following docker/podman run parameters:
**Compose example**
```yaml
version: "3.9"
services:
@@ -54,6 +63,7 @@ services:
```
**Run command example**
```
--memory=<MAXRAM> --memory-swap=<MAXSWAP> --memory-swappiness=0
```

7069
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -17,15 +17,15 @@
"write-heading-ids": "docusaurus write-heading-ids"
},
"dependencies": {
"@docusaurus/core": "^3.5.2",
"@docusaurus/preset-classic": "^3.5.2",
"@docusaurus/theme-mermaid": "^3.5.2",
"@docusaurus/plugin-content-docs": "^3.5.2",
"@mdx-js/react": "^3.0.1",
"@docusaurus/core": "^3.6.3",
"@docusaurus/preset-classic": "^3.6.3",
"@docusaurus/theme-mermaid": "^3.6.3",
"@docusaurus/plugin-content-docs": "^3.6.3",
"@mdx-js/react": "^3.1.0",
"clsx": "^2.1.1",
"docusaurus-plugin-openapi-docs": "^4.1.0",
"docusaurus-theme-openapi-docs": "^4.1.0",
"prism-react-renderer": "^2.4.0",
"docusaurus-plugin-openapi-docs": "^4.3.1",
"docusaurus-theme-openapi-docs": "^4.3.1",
"prism-react-renderer": "^2.4.1",
"raw-loader": "^4.0.2",
"react": "^18.3.1",
"react-dom": "^18.3.1"

View File

@@ -36,6 +36,8 @@ const sidebars: SidebarsConfig = {
'Semantic Search': [
'configuration/semantic_search',
'configuration/genai',
'configuration/face_recognition',
'configuration/license_plate_recognition',
],
Cameras: [
'configuration/cameras',

View File

@@ -3,12 +3,15 @@ import faulthandler
import signal
import sys
import threading
from typing import Union
import ruamel.yaml
from pydantic import ValidationError
from frigate.app import FrigateApp
from frigate.config import FrigateConfig
from frigate.log import setup_logging
from frigate.util.config import find_config_file
def main() -> None:
@@ -42,10 +45,51 @@ def main() -> None:
print("*************************************************************")
print("*************************************************************")
print("*** Config Validation Errors ***")
print("*************************************************************")
print("*************************************************************\n")
# Attempt to get the original config file for line number tracking
config_path = find_config_file()
with open(config_path, "r") as f:
yaml_config = ruamel.yaml.YAML()
yaml_config.preserve_quotes = True
full_config = yaml_config.load(f)
for error in e.errors():
location = ".".join(str(item) for item in error["loc"])
print(f"{location}: {error['msg']}")
error_path = error["loc"]
current = full_config
line_number = "Unknown"
last_line_number = "Unknown"
try:
for i, part in enumerate(error_path):
key: Union[int, str] = (
int(part) if isinstance(part, str) and part.isdigit() else part
)
if isinstance(current, ruamel.yaml.comments.CommentedMap):
current = current[key]
elif isinstance(current, list):
if isinstance(key, int):
current = current[key]
if hasattr(current, "lc"):
last_line_number = current.lc.line
if i == len(error_path) - 1:
if hasattr(current, "lc"):
line_number = current.lc.line
else:
line_number = last_line_number
except Exception as traverse_error:
print(f"Could not determine exact line number: {traverse_error}")
if current != full_config:
print(f"Line # : {line_number}")
print(f"Key : {' -> '.join(map(str, error_path))}")
print(f"Value : {error.get('input', '-')}")
print(f"Message : {error.get('msg', error.get('type', 'Unknown'))}\n")
print("*************************************************************")
print("*** End Config Validation Errors ***")
print("*************************************************************")

View File

@@ -7,15 +7,18 @@ import os
import traceback
from datetime import datetime, timedelta
from functools import reduce
from io import StringIO
from typing import Any, Optional
import requests
import ruamel.yaml
from fastapi import APIRouter, Body, Path, Request, Response
from fastapi.encoders import jsonable_encoder
from fastapi.params import Depends
from fastapi.responses import JSONResponse, PlainTextResponse
from markupsafe import escape
from peewee import operator
from pydantic import ValidationError
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
from frigate.api.defs.request.app_body import AppConfigSetBody
@@ -139,6 +142,8 @@ def config(request: Request):
mode="json", warnings="none", exclude_none=True
)
for stream_name, stream in go2rtc.get("streams", {}).items():
if stream is None:
continue
if isinstance(stream, str):
cleaned = clean_camera_user_pass(stream)
else:
@@ -183,7 +188,6 @@ def config_raw():
@router.post("/config/save")
def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
new_config = body.decode()
if not new_config:
return JSONResponse(
content=(
@@ -194,13 +198,64 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
# Validate the config schema
try:
# Use ruamel to parse and preserve line numbers
yaml_config = ruamel.yaml.YAML()
yaml_config.preserve_quotes = True
full_config = yaml_config.load(StringIO(new_config))
FrigateConfig.parse_yaml(new_config)
except ValidationError as e:
error_message = []
for error in e.errors():
error_path = error["loc"]
current = full_config
line_number = "Unknown"
last_line_number = "Unknown"
try:
for i, part in enumerate(error_path):
key = int(part) if part.isdigit() else part
if isinstance(current, ruamel.yaml.comments.CommentedMap):
current = current[key]
elif isinstance(current, list):
current = current[key]
if hasattr(current, "lc"):
last_line_number = current.lc.line
if i == len(error_path) - 1:
if hasattr(current, "lc"):
line_number = current.lc.line
else:
line_number = last_line_number
except Exception:
line_number = "Unable to determine"
error_message.append(
f"Line {line_number}: {' -> '.join(map(str, error_path))} - {error.get('msg', error.get('type', 'Unknown'))}"
)
return JSONResponse(
content=(
{
"success": False,
"message": "Your configuration is invalid.\nSee the official documentation at docs.frigate.video.\n\n"
+ "\n".join(error_message),
}
),
status_code=400,
)
except Exception:
return JSONResponse(
content=(
{
"success": False,
"message": f"\nConfig Error:\n\n{escape(str(traceback.format_exc()))}",
"message": f"\nYour configuration is invalid.\nSee the official documentation at docs.frigate.video.\n\n{escape(str(traceback.format_exc()))}",
}
),
status_code=400,

View File

@@ -0,0 +1,127 @@
"""Object classification APIs."""
import logging
import os
import random
import shutil
import string
from fastapi import APIRouter, Request, UploadFile
from fastapi.responses import JSONResponse
from pathvalidate import sanitize_filename
from frigate.api.defs.tags import Tags
from frigate.const import FACE_DIR
from frigate.embeddings import EmbeddingsContext
logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.events])
@router.get("/faces")
def get_faces():
face_dict: dict[str, list[str]] = {}
for name in os.listdir(FACE_DIR):
face_dir = os.path.join(FACE_DIR, name)
if not os.path.isdir(face_dir):
continue
face_dict[name] = []
for file in sorted(
os.listdir(face_dir),
key=lambda f: os.path.getctime(os.path.join(face_dir, f)),
reverse=True,
):
face_dict[name].append(file)
return JSONResponse(status_code=200, content=face_dict)
@router.post("/faces/{name}")
async def register_face(request: Request, name: str, file: UploadFile):
if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse(
status_code=400,
content={"message": "Face recognition is not enabled.", "success": False},
)
context: EmbeddingsContext = request.app.embeddings
result = context.register_face(name, await file.read())
return JSONResponse(
status_code=200 if result.get("success", True) else 400,
content=result,
)
@router.post("/faces/train/{name}/classify")
def train_face(request: Request, name: str, body: dict = None):
if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse(
status_code=400,
content={"message": "Face recognition is not enabled.", "success": False},
)
json: dict[str, any] = body or {}
training_file = os.path.join(
FACE_DIR, f"train/{sanitize_filename(json.get('training_file', ''))}"
)
if not training_file or not os.path.isfile(training_file):
return JSONResponse(
content=(
{
"success": False,
"message": f"Invalid filename or no file exists: {training_file}",
}
),
status_code=404,
)
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
new_name = f"{name}-{rand_id}.webp"
new_file = os.path.join(FACE_DIR, f"{name}/{new_name}")
shutil.move(training_file, new_file)
context: EmbeddingsContext = request.app.embeddings
context.clear_face_classifier()
return JSONResponse(
content=(
{
"success": True,
"message": f"Successfully saved {training_file} as {new_name}.",
}
),
status_code=200,
)
@router.post("/faces/{name}/delete")
def deregister_faces(request: Request, name: str, body: dict = None):
if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse(
status_code=400,
content={"message": "Face recognition is not enabled.", "success": False},
)
json: dict[str, any] = body or {}
list_of_ids = json.get("ids", "")
if not list_of_ids or len(list_of_ids) == 0:
return JSONResponse(
content=({"success": False, "message": "Not a valid list of ids"}),
status_code=404,
)
context: EmbeddingsContext = request.app.embeddings
context.delete_face_ids(
name, map(lambda file: sanitize_filename(file), list_of_ids)
)
return JSONResponse(
content=({"success": True, "message": "Successfully deleted faces."}),
status_code=200,
)

View File

@@ -20,6 +20,7 @@ class MediaLatestFrameQueryParams(BaseModel):
regions: Optional[int] = None
quality: Optional[int] = 70
height: Optional[int] = None
store: Optional[int] = None
class MediaEventsSnapshotQueryParams(BaseModel):

View File

@@ -8,6 +8,9 @@ class EventsSubLabelBody(BaseModel):
subLabelScore: Optional[float] = Field(
title="Score for sub label", default=None, gt=0.0, le=1.0
)
camera: Optional[str] = Field(
title="Camera this object is detected on.", default=None
)
class EventsDescriptionBody(BaseModel):

View File

@@ -10,4 +10,5 @@ class Tags(Enum):
review = "Review"
export = "Export"
events = "Events"
classification = "classification"
auth = "Auth"

View File

@@ -909,38 +909,59 @@ def set_sub_label(
try:
event: Event = Event.get(Event.id == event_id)
except DoesNotExist:
if not body.camera:
return JSONResponse(
content=(
{
"success": False,
"message": "Event "
+ event_id
+ " not found and camera is not provided.",
}
),
status_code=404,
)
event = None
if request.app.detected_frames_processor:
tracked_obj: TrackedObject = (
request.app.detected_frames_processor.camera_states[
event.camera if event else body.camera
].tracked_objects.get(event_id)
)
else:
tracked_obj = None
if not event and not tracked_obj:
return JSONResponse(
content=({"success": False, "message": "Event " + event_id + " not found"}),
content=(
{"success": False, "message": "Event " + event_id + " not found."}
),
status_code=404,
)
new_sub_label = body.subLabel
new_score = body.subLabelScore
if not event.end_time:
# update tracked object
tracked_obj: TrackedObject = (
request.app.detected_frames_processor.camera_states[
event.camera
].tracked_objects.get(event.id)
)
if tracked_obj:
tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score)
if tracked_obj:
tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score)
# update timeline items
Timeline.update(
data=Timeline.data.update({"sub_label": (new_sub_label, new_score)})
).where(Timeline.source_id == event_id).execute()
event.sub_label = new_sub_label
if event:
event.sub_label = new_sub_label
if new_score:
data = event.data
data["sub_label_score"] = new_score
event.data = data
if new_score:
data = event.data
data["sub_label_score"] = new_score
event.data = data
event.save()
event.save()
return JSONResponse(
content=(
{

View File

@@ -11,7 +11,16 @@ from starlette_context import middleware, plugins
from starlette_context.plugins import Plugin
from frigate.api import app as main_app
from frigate.api import auth, event, export, media, notification, preview, review
from frigate.api import (
auth,
classification,
event,
export,
media,
notification,
preview,
review,
)
from frigate.api.auth import get_jwt_secret, limiter
from frigate.comms.event_metadata_updater import (
EventMetadataPublisher,
@@ -99,6 +108,7 @@ def create_fastapi_app(
# Routes
# Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters
app.include_router(auth.router)
app.include_router(classification.router)
app.include_router(review.router)
app.include_router(main_app.router)
app.include_router(preview.router)

View File

@@ -179,7 +179,12 @@ def latest_frame(
return Response(
content=img.tobytes(),
media_type=f"image/{extension}",
headers={"Content-Type": f"image/{extension}", "Cache-Control": "no-store"},
headers={
"Content-Type": f"image/{extension}",
"Cache-Control": "no-store"
if not params.store
else "private, max-age=60",
},
)
elif camera_name == "birdseye" and request.app.frigate_config.birdseye.restream:
frame = cv2.cvtColor(
@@ -198,7 +203,12 @@ def latest_frame(
return Response(
content=img.tobytes(),
media_type=f"image/{extension}",
headers={"Content-Type": f"image/{extension}", "Cache-Control": "no-store"},
headers={
"Content-Type": f"image/{extension}",
"Cache-Control": "no-store"
if not params.store
else "private, max-age=60",
},
)
else:
return JSONResponse(

View File

@@ -34,10 +34,12 @@ from frigate.const import (
CLIPS_DIR,
CONFIG_DIR,
EXPORT_DIR,
FACE_DIR,
MODEL_CACHE_DIR,
RECORD_DIR,
SHM_FRAMES_VAR,
)
from frigate.data_processing.types import DataProcessorMetrics
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.embeddings import EmbeddingsContext, manage_embeddings
from frigate.events.audio import AudioProcessor
@@ -88,6 +90,9 @@ class FrigateApp:
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
self.log_queue: Queue = mp.Queue()
self.camera_metrics: dict[str, CameraMetrics] = {}
self.embeddings_metrics: DataProcessorMetrics | None = (
DataProcessorMetrics() if config.semantic_search.enabled else None
)
self.ptz_metrics: dict[str, PTZMetrics] = {}
self.processes: dict[str, int] = {}
self.embeddings: Optional[EmbeddingsContext] = None
@@ -96,14 +101,19 @@ class FrigateApp:
self.config = config
def ensure_dirs(self) -> None:
for d in [
dirs = [
CONFIG_DIR,
RECORD_DIR,
f"{CLIPS_DIR}/cache",
CACHE_DIR,
MODEL_CACHE_DIR,
EXPORT_DIR,
]:
]
if self.config.face_recognition.enabled:
dirs.append(FACE_DIR)
for d in dirs:
if not os.path.exists(d) and not os.path.islink(d):
logger.info(f"Creating directory: {d}")
os.makedirs(d)
@@ -229,7 +239,10 @@ class FrigateApp:
embedding_process = util.Process(
target=manage_embeddings,
name="embeddings_manager",
args=(self.config,),
args=(
self.config,
self.embeddings_metrics,
),
)
embedding_process.daemon = True
self.embedding_process = embedding_process
@@ -437,7 +450,7 @@ class FrigateApp:
# pre-create shms
for i in range(shm_frame_count):
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
self.frame_manager.create(f"{config.name}_{i}", frame_size)
self.frame_manager.create(f"{config.name}_frame{i}", frame_size)
capture_process = util.Process(
target=capture_camera,
@@ -491,7 +504,11 @@ class FrigateApp:
self.stats_emitter = StatsEmitter(
self.config,
stats_init(
self.config, self.camera_metrics, self.detectors, self.processes
self.config,
self.camera_metrics,
self.embeddings_metrics,
self.detectors,
self.processes,
),
self.stop_event,
)

View File

@@ -0,0 +1,130 @@
"""Manage camera activity and updating listeners."""
from collections import Counter
from typing import Callable
from frigate.config.config import FrigateConfig
class CameraActivityManager:
def __init__(
self, config: FrigateConfig, publish: Callable[[str, any], None]
) -> None:
self.config = config
self.publish = publish
self.last_camera_activity: dict[str, dict[str, any]] = {}
self.camera_all_object_counts: dict[str, Counter] = {}
self.camera_active_object_counts: dict[str, Counter] = {}
self.zone_all_object_counts: dict[str, Counter] = {}
self.zone_active_object_counts: dict[str, Counter] = {}
self.all_zone_labels: dict[str, set[str]] = {}
for camera_config in config.cameras.values():
if not camera_config.enabled:
continue
self.last_camera_activity[camera_config.name] = {}
self.camera_all_object_counts[camera_config.name] = Counter()
self.camera_active_object_counts[camera_config.name] = Counter()
for zone, zone_config in camera_config.zones.items():
if zone not in self.all_zone_labels:
self.zone_all_object_counts[zone] = Counter()
self.zone_active_object_counts[zone] = Counter()
self.all_zone_labels[zone] = set()
self.all_zone_labels[zone].update(zone_config.objects)
def update_activity(self, new_activity: dict[str, dict[str, any]]) -> None:
all_objects: list[dict[str, any]] = []
for camera in new_activity.keys():
new_objects = new_activity[camera].get("objects", [])
all_objects.extend(new_objects)
if self.last_camera_activity.get(camera, {}).get("objects") != new_objects:
self.compare_camera_activity(camera, new_objects)
# run through every zone, getting a count of objects in that zone right now
for zone, labels in self.all_zone_labels.items():
all_zone_objects = Counter(
obj["label"].replace("-verified", "")
for obj in all_objects
if zone in obj["current_zones"]
)
active_zone_objects = Counter(
obj["label"].replace("-verified", "")
for obj in all_objects
if zone in obj["current_zones"] and not obj["stationary"]
)
any_changed = False
# run through each object and check what topics need to be updated for this zone
for label in labels:
new_count = all_zone_objects[label]
new_active_count = active_zone_objects[label]
if (
new_count != self.zone_all_object_counts[zone][label]
or label not in self.zone_all_object_counts[zone]
):
any_changed = True
self.publish(f"{zone}/{label}", new_count)
self.zone_all_object_counts[zone][label] = new_count
if (
new_active_count != self.zone_active_object_counts[zone][label]
or label not in self.zone_active_object_counts[zone]
):
any_changed = True
self.publish(f"{zone}/{label}/active", new_active_count)
self.zone_active_object_counts[zone][label] = new_active_count
if any_changed:
self.publish(f"{zone}/all", sum(list(all_zone_objects.values())))
self.publish(
f"{zone}/all/active", sum(list(active_zone_objects.values()))
)
self.last_camera_activity = new_activity
def compare_camera_activity(
self, camera: str, new_activity: dict[str, any]
) -> None:
all_objects = Counter(
obj["label"].replace("-verified", "") for obj in new_activity
)
active_objects = Counter(
obj["label"].replace("-verified", "")
for obj in new_activity
if not obj["stationary"]
)
any_changed = False
# run through each object and check what topics need to be updated
for label in self.config.cameras[camera].objects.track:
if label in self.config.model.all_attributes:
continue
new_count = all_objects[label]
new_active_count = active_objects[label]
if (
new_count != self.camera_all_object_counts[camera][label]
or label not in self.camera_all_object_counts[camera]
):
any_changed = True
self.publish(f"{camera}/{label}", new_count)
self.camera_all_object_counts[camera][label] = new_count
if (
new_active_count != self.camera_active_object_counts[camera][label]
or label not in self.camera_active_object_counts[camera]
):
any_changed = True
self.publish(f"{camera}/{label}/active", new_active_count)
self.camera_active_object_counts[camera][label] = new_active_count
if any_changed:
self.publish(f"{camera}/all", sum(list(all_objects.values())))
self.publish(f"{camera}/all/active", sum(list(active_objects.values())))

View File

@@ -7,6 +7,7 @@ from abc import ABC, abstractmethod
from typing import Any, Callable, Optional
from frigate.camera import PTZMetrics
from frigate.camera.activity_manager import CameraActivityManager
from frigate.comms.config_updater import ConfigPublisher
from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import (
@@ -64,7 +65,7 @@ class Dispatcher:
self.onvif = onvif
self.ptz_metrics = ptz_metrics
self.comms = communicators
self.camera_activity = {}
self.camera_activity = CameraActivityManager(config, self.publish)
self.model_state = {}
self.embeddings_reindex = {}
@@ -130,7 +131,7 @@ class Dispatcher:
).execute()
def handle_update_camera_activity():
self.camera_activity = payload
self.camera_activity.update_activity(payload)
def handle_update_event_description():
event: Event = Event.get(Event.id == payload["id"])
@@ -171,7 +172,7 @@ class Dispatcher:
)
def handle_on_connect():
camera_status = self.camera_activity.copy()
camera_status = self.camera_activity.last_camera_activity.copy()
for camera in camera_status.keys():
camera_status[camera]["config"] = {

View File

@@ -9,9 +9,11 @@ SOCKET_REP_REQ = "ipc:///tmp/cache/embeddings"
class EmbeddingsRequestEnum(Enum):
clear_face_classifier = "clear_face_classifier"
embed_description = "embed_description"
embed_thumbnail = "embed_thumbnail"
generate_search = "generate_search"
register_face = "register_face"
class EmbeddingsResponder:
@@ -22,7 +24,7 @@ class EmbeddingsResponder:
def check_for_request(self, process: Callable) -> None:
while True: # load all messages that are queued
has_message, _, _ = zmq.select([self.socket], [], [], 0.1)
has_message, _, _ = zmq.select([self.socket], [], [], 0.01)
if not has_message:
break

View File

@@ -151,7 +151,7 @@ class WebPushClient(Communicator): # type: ignore[misc]
camera: str = payload["after"]["camera"]
title = f"{', '.join(sorted_objects).replace('_', ' ').title()}{' was' if state == 'end' else ''} detected in {', '.join(payload['after']['data']['zones']).replace('_', ' ').title()}"
message = f"Detected on {camera.replace('_', ' ').title()}"
image = f'{payload["after"]["thumb_path"].replace("/media/frigate", "")}'
image = f"{payload['after']['thumb_path'].replace('/media/frigate', '')}"
# if event is ongoing open to live view otherwise open to recordings view
direct_url = f"/review?id={reviewId}" if state == "end" else f"/#{camera}"

View File

@@ -3,13 +3,13 @@ from frigate.detectors import DetectorConfig, ModelConfig # noqa: F401
from .auth import * # noqa: F403
from .camera import * # noqa: F403
from .camera_group import * # noqa: F403
from .classification import * # noqa: F403
from .config import * # noqa: F403
from .database import * # noqa: F403
from .logger import * # noqa: F403
from .mqtt import * # noqa: F403
from .notification import * # noqa: F403
from .proxy import * # noqa: F403
from .semantic_search import * # noqa: F403
from .telemetry import * # noqa: F403
from .tls import * # noqa: F403
from .ui import * # noqa: F403

View File

@@ -167,7 +167,7 @@ class CameraConfig(FrigateBaseModel):
record_args = get_ffmpeg_arg_list(
parse_preset_output_record(
self.ffmpeg.output_args.record,
self.ffmpeg.output_args._force_record_hvc1,
self.ffmpeg.apple_compatibility,
)
or self.ffmpeg.output_args.record
)

View File

@@ -2,7 +2,7 @@ import shutil
from enum import Enum
from typing import Union
from pydantic import Field, PrivateAttr, field_validator
from pydantic import Field, field_validator
from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS
@@ -42,7 +42,6 @@ class FfmpegOutputArgsConfig(FrigateBaseModel):
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Record role FFmpeg output arguments.",
)
_force_record_hvc1: bool = PrivateAttr(default=False)
class FfmpegConfig(FrigateBaseModel):
@@ -64,6 +63,10 @@ class FfmpegConfig(FrigateBaseModel):
default=10.0,
title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
)
apple_compatibility: bool = Field(
default=False,
title="Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players.",
)
@property
def ffmpeg_path(self) -> str:

View File

@@ -38,6 +38,10 @@ class GenAICameraConfig(BaseModel):
default_factory=list,
title="List of required zones to be entered in order to run generative AI.",
)
debug_save_thumbnails: bool = Field(
default=False,
title="Save thumbnails sent to generative AI for debugging purposes.",
)
@field_validator("required_zones", mode="before")
@classmethod

View File

@@ -1,6 +1,6 @@
from typing import Any, Optional, Union
from pydantic import Field, field_serializer
from pydantic import Field, PrivateAttr, field_serializer
from ..base import FrigateBaseModel
@@ -53,3 +53,20 @@ class ObjectConfig(FrigateBaseModel):
default_factory=dict, title="Object filters."
)
mask: Union[str, list[str]] = Field(default="", title="Object mask.")
_all_objects: list[str] = PrivateAttr()
@property
def all_objects(self) -> list[str]:
return self._all_objects
def parse_all_objects(self, cameras):
if "_all_objects" in self:
return
# get list of unique enabled labels for tracking
enabled_labels = set(self.track)
for camera in cameras.values():
enabled_labels.update(camera.objects.track)
self._all_objects = list(enabled_labels)

View File

@@ -74,6 +74,7 @@ class OnvifConfig(FrigateBaseModel):
port: int = Field(default=8000, title="Onvif Port")
user: Optional[EnvString] = Field(default=None, title="Onvif Username")
password: Optional[EnvString] = Field(default=None, title="Onvif Password")
tls_insecure: bool = Field(default=False, title="Onvif Disable TLS verification")
autotracking: PtzAutotrackConfig = Field(
default_factory=PtzAutotrackConfig,
title="PTZ auto tracking config.",

View File

@@ -4,6 +4,7 @@ from typing import Optional
from pydantic import Field
from frigate.const import MAX_PRE_CAPTURE
from frigate.review.types import SeverityEnum
from ..base import FrigateBaseModel
@@ -101,3 +102,15 @@ class RecordConfig(FrigateBaseModel):
self.alerts.pre_capture,
self.detections.pre_capture,
)
def get_review_pre_capture(self, severity: SeverityEnum) -> int:
if severity == SeverityEnum.alert:
return self.alerts.pre_capture
else:
return self.detections.pre_capture
def get_review_post_capture(self, severity: SeverityEnum) -> int:
if severity == SeverityEnum.alert:
return self.alerts.post_capture
else:
return self.detections.post_capture

View File

@@ -85,7 +85,7 @@ class ZoneConfig(BaseModel):
if explicit:
self.coordinates = ",".join(
[
f'{round(int(p.split(",")[0]) / frame_shape[1], 3)},{round(int(p.split(",")[1]) / frame_shape[0], 3)}'
f"{round(int(p.split(',')[0]) / frame_shape[1], 3)},{round(int(p.split(',')[1]) / frame_shape[0], 3)}"
for p in coordinates
]
)

View File

@@ -0,0 +1,74 @@
from typing import Dict, List, Optional
from pydantic import Field
from .base import FrigateBaseModel
__all__ = [
"FaceRecognitionConfig",
"SemanticSearchConfig",
"LicensePlateRecognitionConfig",
]
class BirdClassificationConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable bird classification.")
threshold: float = Field(
default=0.9,
title="Minimum classification score required to be considered a match.",
gt=0.0,
le=1.0,
)
class ClassificationConfig(FrigateBaseModel):
bird: BirdClassificationConfig = Field(
default_factory=BirdClassificationConfig, title="Bird classification config."
)
class SemanticSearchConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable semantic search.")
reindex: Optional[bool] = Field(
default=False, title="Reindex all detections on startup."
)
model_size: str = Field(
default="small", title="The size of the embeddings model used."
)
class FaceRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable face recognition.")
min_score: float = Field(
title="Minimum face distance score required to save the attempt.",
default=0.8,
gt=0.0,
le=1.0,
)
threshold: float = Field(
default=0.9,
title="Minimum face distance score required to be considered a match.",
gt=0.0,
le=1.0,
)
min_area: int = Field(
default=500, title="Min area of face box to consider running face recognition."
)
save_attempts: bool = Field(
default=True, title="Save images of face detections for training."
)
class LicensePlateRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable license plate recognition.")
threshold: float = Field(
default=0.9,
title="License plate confidence score required to be added to the object as a sub label.",
)
min_area: int = Field(
default=500,
title="Min area of license plate to consider running license plate recognition.",
)
known_plates: Optional[Dict[str, List[str]]] = Field(
default={}, title="Known plates to track."
)

View File

@@ -51,13 +51,18 @@ from .camera.review import ReviewConfig
from .camera.snapshots import SnapshotsConfig
from .camera.timestamp import TimestampStyleConfig
from .camera_group import CameraGroupConfig
from .classification import (
ClassificationConfig,
FaceRecognitionConfig,
LicensePlateRecognitionConfig,
SemanticSearchConfig,
)
from .database import DatabaseConfig
from .env import EnvVars
from .logger import LoggerConfig
from .mqtt import MqttConfig
from .notification import NotificationConfig
from .proxy import ProxyConfig
from .semantic_search import SemanticSearchConfig
from .telemetry import TelemetryConfig
from .tls import TlsConfig
from .ui import UIConfig
@@ -159,6 +164,16 @@ class RestreamConfig(BaseModel):
model_config = ConfigDict(extra="allow")
def verify_semantic_search_dependent_configs(config: FrigateConfig) -> None:
"""Verify that semantic search is enabled if required features are enabled."""
if not config.semantic_search.enabled:
if config.genai.enabled:
raise ValueError("Genai requires semantic search to be enabled.")
if config.face_recognition.enabled:
raise ValueError("Face recognition requires semantic to be enabled.")
def verify_config_roles(camera_config: CameraConfig) -> None:
"""Verify that roles are setup in the config correctly."""
assigned_roles = list(
@@ -317,9 +332,19 @@ class FrigateConfig(FrigateBaseModel):
default_factory=TelemetryConfig, title="Telemetry configuration."
)
tls: TlsConfig = Field(default_factory=TlsConfig, title="TLS configuration.")
classification: ClassificationConfig = Field(
default_factory=ClassificationConfig, title="Object classification config."
)
semantic_search: SemanticSearchConfig = Field(
default_factory=SemanticSearchConfig, title="Semantic search configuration."
)
face_recognition: FaceRecognitionConfig = Field(
default_factory=FaceRecognitionConfig, title="Face recognition config."
)
lpr: LicensePlateRecognitionConfig = Field(
default_factory=LicensePlateRecognitionConfig,
title="License Plate recognition config.",
)
ui: UIConfig = Field(default_factory=UIConfig, title="UI configuration.")
# Detector config
@@ -437,13 +462,12 @@ class FrigateConfig(FrigateBaseModel):
camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args
for input in camera_config.ffmpeg.inputs:
need_record_fourcc = False and "record" in input.roles
need_detect_dimensions = "detect" in input.roles and (
camera_config.detect.height is None
or camera_config.detect.width is None
)
if need_detect_dimensions or need_record_fourcc:
if need_detect_dimensions:
stream_info = {"width": 0, "height": 0, "fourcc": None}
try:
stream_info = stream_info_retriever.get_stream_info(
@@ -467,14 +491,6 @@ class FrigateConfig(FrigateBaseModel):
else DEFAULT_DETECT_DIMENSIONS["height"]
)
if need_record_fourcc:
# Apple only supports HEVC if it is hvc1 (vs. hev1)
camera_config.ffmpeg.output_args._force_record_hvc1 = (
stream_info["fourcc"] == "hevc"
if stream_info.get("hevc")
else False
)
# Warn if detect fps > 10
if camera_config.detect.fps > 10:
logger.warning(
@@ -578,13 +594,8 @@ class FrigateConfig(FrigateBaseModel):
verify_autotrack_zones(camera_config)
verify_motion_and_detect(camera_config)
# get list of unique enabled labels for tracking
enabled_labels = set(self.objects.track)
for camera in self.cameras.values():
enabled_labels.update(camera.objects.track)
self.model.create_colormap(sorted(enabled_labels))
self.objects.parse_all_objects(self.cameras)
self.model.create_colormap(sorted(self.objects.all_objects))
self.model.check_and_load_plus_model(self.plus_api)
for key, detector in self.detectors.items():
@@ -594,37 +605,30 @@ class FrigateConfig(FrigateBaseModel):
if isinstance(detector, dict)
else detector.model_dump(warnings="none")
)
detector_config: DetectorConfig = adapter.validate_python(model_dict)
if detector_config.model is None:
detector_config.model = self.model.model_copy()
else:
path = detector_config.model.path
detector_config.model = self.model.model_copy()
detector_config.model.path = path
detector_config: BaseDetectorConfig = adapter.validate_python(model_dict)
if "path" not in model_dict or len(model_dict.keys()) > 1:
logger.warning(
"Customizing more than a detector model path is unsupported."
)
# users should not set model themselves
if detector_config.model:
detector_config.model = None
merged_model = deep_merge(
detector_config.model.model_dump(exclude_unset=True, warnings="none"),
self.model.model_dump(exclude_unset=True, warnings="none"),
)
model_config = self.model.model_dump(exclude_unset=True, warnings="none")
if "path" not in merged_model:
if detector_config.model_path:
model_config["path"] = detector_config.model_path
if "path" not in model_config:
if detector_config.type == "cpu":
merged_model["path"] = "/cpu_model.tflite"
model_config["path"] = "/cpu_model.tflite"
elif detector_config.type == "edgetpu":
merged_model["path"] = "/edgetpu_model.tflite"
model_config["path"] = "/edgetpu_model.tflite"
detector_config.model = ModelConfig.model_validate(merged_model)
detector_config.model.check_and_load_plus_model(
self.plus_api, detector_config.type
)
detector_config.model.compute_model_hash()
model = ModelConfig.model_validate(model_config)
model.check_and_load_plus_model(self.plus_api, detector_config.type)
model.compute_model_hash()
detector_config.model = model
self.detectors[key] = detector_config
verify_semantic_search_dependent_configs(self)
return self
@field_validator("cameras")

View File

@@ -29,6 +29,7 @@ class LoggerConfig(FrigateBaseModel):
logging.getLogger().setLevel(self.default.value.upper())
log_levels = {
"httpx": LogLevel.error,
"werkzeug": LogLevel.error,
"ws4py": LogLevel.error,
**self.logs,

View File

@@ -1,17 +0,0 @@
from typing import Optional
from pydantic import Field
from .base import FrigateBaseModel
__all__ = ["SemanticSearchConfig"]
class SemanticSearchConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable semantic search.")
reindex: Optional[bool] = Field(
default=False, title="Reindex all detections on startup."
)
model_size: str = Field(
default="small", title="The size of the embeddings model used."
)

View File

@@ -11,6 +11,9 @@ class StatsConfig(FrigateBaseModel):
network_bandwidth: bool = Field(
default=False, title="Enable network bandwidth for ffmpeg processes."
)
sriov: bool = Field(
default=False, title="Treat device as SR-IOV to support GPU stats."
)
class TelemetryConfig(FrigateBaseModel):

View File

@@ -5,8 +5,9 @@ DEFAULT_DB_PATH = f"{CONFIG_DIR}/frigate.db"
MODEL_CACHE_DIR = f"{CONFIG_DIR}/model_cache"
BASE_DIR = "/media/frigate"
CLIPS_DIR = f"{BASE_DIR}/clips"
RECORD_DIR = f"{BASE_DIR}/recordings"
EXPORT_DIR = f"{BASE_DIR}/exports"
FACE_DIR = f"{CLIPS_DIR}/faces"
RECORD_DIR = f"{BASE_DIR}/recordings"
BIRDSEYE_PIPE = "/tmp/cache/birdseye"
CACHE_DIR = "/tmp/cache"
FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
@@ -64,6 +65,7 @@ INCLUDED_FFMPEG_VERSIONS = ["7.0", "5.0"]
FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
FFMPEG_HWACCEL_VULKAN = "preset-vulkan"
FFMPEG_HVC1_ARGS = ["-tag:v", "hvc1"]
# Regex constants

View File

@@ -0,0 +1,43 @@
"""Local or remote processors to handle post processing."""
import logging
from abc import ABC, abstractmethod
from frigate.config import FrigateConfig
from ..types import DataProcessorMetrics, PostProcessDataEnum
logger = logging.getLogger(__name__)
class PostProcessorApi(ABC):
@abstractmethod
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics) -> None:
self.config = config
self.metrics = metrics
pass
@abstractmethod
def process_data(
self, data: dict[str, any], data_type: PostProcessDataEnum
) -> None:
"""Processes the data of data type.
Args:
data (dict): containing data about the input.
data_type (enum): Describing the data that is being processed.
Returns:
None.
"""
pass
@abstractmethod
def handle_request(self, request_data: dict[str, any]) -> dict[str, any] | None:
"""Handle metadata requests.
Args:
request_data (dict): containing data about requested change to process.
Returns:
None if request was not handled, otherwise return response.
"""
pass

View File

@@ -0,0 +1,57 @@
"""Local only processors for handling real time object processing."""
import logging
from abc import ABC, abstractmethod
import numpy as np
from frigate.config import FrigateConfig
from ..types import DataProcessorMetrics
logger = logging.getLogger(__name__)
class RealTimeProcessorApi(ABC):
@abstractmethod
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics) -> None:
self.config = config
self.metrics = metrics
pass
@abstractmethod
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray) -> None:
"""Processes the frame with object data.
Args:
obj_data (dict): containing data about focused object in frame.
frame (ndarray): full yuv frame.
Returns:
None.
"""
pass
@abstractmethod
def handle_request(
self, topic: str, request_data: dict[str, any]
) -> dict[str, any] | None:
"""Handle metadata requests.
Args:
topic (str): topic that dictates what work is requested.
request_data (dict): containing data about requested change to process.
Returns:
None if request was not handled, otherwise return response.
"""
pass
@abstractmethod
def expire_object(self, object_id: str) -> None:
"""Handle objects that are no longer detected.
Args:
object_id (str): id of object that is no longer detected.
Returns:
None.
"""
pass

View File

@@ -0,0 +1,154 @@
"""Handle processing images to classify birds."""
import logging
import os
import cv2
import numpy as np
import requests
from frigate.config import FrigateConfig
from frigate.const import FRIGATE_LOCALHOST, MODEL_CACHE_DIR
from frigate.util.object import calculate_region
from ..types import DataProcessorMetrics
from .api import RealTimeProcessorApi
try:
from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError:
from tensorflow.lite.python.interpreter import Interpreter
logger = logging.getLogger(__name__)
class BirdProcessor(RealTimeProcessorApi):
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics):
super().__init__(config, metrics)
self.interpreter: Interpreter = None
self.tensor_input_details: dict[str, any] = None
self.tensor_output_details: dict[str, any] = None
self.detected_birds: dict[str, float] = {}
self.labelmap: dict[int, str] = {}
download_path = os.path.join(MODEL_CACHE_DIR, "bird")
self.model_files = {
"bird.tflite": "https://raw.githubusercontent.com/google-coral/test_data/master/mobilenet_v2_1.0_224_inat_bird_quant.tflite",
"birdmap.txt": "https://raw.githubusercontent.com/google-coral/test_data/master/inat_bird_labels.txt",
}
if not all(
os.path.exists(os.path.join(download_path, n))
for n in self.model_files.keys()
):
# conditionally import ModelDownloader
from frigate.util.downloader import ModelDownloader
self.downloader = ModelDownloader(
model_name="bird",
download_path=download_path,
file_names=self.model_files.keys(),
download_func=self.__download_models,
complete_func=self.__build_detector,
)
self.downloader.ensure_model_files()
else:
self.__build_detector()
def __download_models(self, path: str) -> None:
try:
file_name = os.path.basename(path)
# conditionally import ModelDownloader
from frigate.util.downloader import ModelDownloader
ModelDownloader.download_from_url(self.model_files[file_name], path)
except Exception as e:
logger.error(f"Failed to download {path}: {e}")
def __build_detector(self) -> None:
self.interpreter = Interpreter(
model_path=os.path.join(MODEL_CACHE_DIR, "bird/bird.tflite"),
num_threads=2,
)
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
i = 0
with open(os.path.join(MODEL_CACHE_DIR, "bird/birdmap.txt")) as f:
line = f.readline()
while line:
start = line.find("(")
end = line.find(")")
self.labelmap[i] = line[start + 1 : end]
i += 1
line = f.readline()
def process_frame(self, obj_data, frame):
if obj_data["label"] != "bird":
return
x, y, x2, y2 = calculate_region(
frame.shape,
obj_data["box"][0],
obj_data["box"][1],
obj_data["box"][2],
obj_data["box"][3],
224,
1.0,
)
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
input = rgb[
y:y2,
x:x2,
]
cv2.imwrite("/media/frigate/test_class.png", input)
input = np.expand_dims(input, axis=0)
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input)
self.interpreter.invoke()
res: np.ndarray = self.interpreter.get_tensor(
self.tensor_output_details[0]["index"]
)[0]
probs = res / res.sum(axis=0)
best_id = np.argmax(probs)
if best_id == 964:
logger.debug("No bird classification was detected.")
return
score = round(probs[best_id], 2)
if score < self.config.classification.bird.threshold:
logger.debug(f"Score {score} is not above required threshold")
return
previous_score = self.detected_birds.get(obj_data["id"], 0.0)
if score <= previous_score:
logger.debug(f"Score {score} is worse than previous score {previous_score}")
return
resp = requests.post(
f"{FRIGATE_LOCALHOST}/api/events/{obj_data['id']}/sub_label",
json={
"camera": obj_data.get("camera"),
"subLabel": self.labelmap[best_id],
"subLabelScore": score,
},
)
if resp.status_code == 200:
self.detected_birds[obj_data["id"]] = score
def handle_request(self, topic, request_data):
return None
def expire_object(self, object_id):
if object_id in self.detected_birds:
self.detected_birds.pop(object_id)

View File

@@ -0,0 +1,406 @@
"""Handle processing images for face detection and recognition."""
import base64
import datetime
import logging
import os
import random
import string
from typing import Optional
import cv2
import numpy as np
import requests
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
from frigate.config import FrigateConfig
from frigate.const import FACE_DIR, FRIGATE_LOCALHOST, MODEL_CACHE_DIR
from frigate.util.image import area
from ..types import DataProcessorMetrics
from .api import RealTimeProcessorApi
logger = logging.getLogger(__name__)
MIN_MATCHING_FACES = 2
class FaceProcessor(RealTimeProcessorApi):
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics):
super().__init__(config, metrics)
self.face_config = config.face_recognition
self.face_detector: cv2.FaceDetectorYN = None
self.landmark_detector: cv2.face.FacemarkLBF = None
self.face_recognizer: cv2.face.LBPHFaceRecognizer = None
self.requires_face_detection = "face" not in self.config.objects.all_objects
self.detected_faces: dict[str, float] = {}
download_path = os.path.join(MODEL_CACHE_DIR, "facedet")
self.model_files = {
"facedet.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facedet.onnx",
"landmarkdet.yaml": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/landmarkdet.yaml",
}
if not all(
os.path.exists(os.path.join(download_path, n))
for n in self.model_files.keys()
):
# conditionally import ModelDownloader
from frigate.util.downloader import ModelDownloader
self.downloader = ModelDownloader(
model_name="facedet",
download_path=download_path,
file_names=self.model_files.keys(),
download_func=self.__download_models,
complete_func=self.__build_detector,
)
self.downloader.ensure_model_files()
else:
self.__build_detector()
self.label_map: dict[int, str] = {}
self.__build_classifier()
def __download_models(self, path: str) -> None:
try:
file_name = os.path.basename(path)
# conditionally import ModelDownloader
from frigate.util.downloader import ModelDownloader
ModelDownloader.download_from_url(self.model_files[file_name], path)
except Exception as e:
logger.error(f"Failed to download {path}: {e}")
def __build_detector(self) -> None:
self.face_detector = cv2.FaceDetectorYN.create(
"/config/model_cache/facedet/facedet.onnx",
config="",
input_size=(320, 320),
score_threshold=0.8,
nms_threshold=0.3,
)
self.landmark_detector = cv2.face.createFacemarkLBF()
self.landmark_detector.loadModel("/config/model_cache/facedet/landmarkdet.yaml")
def __build_classifier(self) -> None:
if not self.landmark_detector:
return None
labels = []
faces = []
dir = "/media/frigate/clips/faces"
for idx, name in enumerate(os.listdir(dir)):
if name == "train":
continue
face_folder = os.path.join(dir, name)
if not os.path.isdir(face_folder):
continue
self.label_map[idx] = name
for image in os.listdir(face_folder):
img = cv2.imread(os.path.join(face_folder, image))
if img is None:
continue
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = self.__align_face(img, img.shape[1], img.shape[0])
faces.append(img)
labels.append(idx)
self.recognizer: cv2.face.LBPHFaceRecognizer = (
cv2.face.LBPHFaceRecognizer_create(
radius=2, threshold=(1 - self.face_config.min_score) * 1000
)
)
self.recognizer.train(faces, np.array(labels))
def __align_face(
self,
image: np.ndarray,
output_width: int,
output_height: int,
) -> np.ndarray:
_, lands = self.landmark_detector.fit(
image, np.array([(0, 0, image.shape[1], image.shape[0])])
)
landmarks: np.ndarray = lands[0][0]
# get landmarks for eyes
leftEyePts = landmarks[42:48]
rightEyePts = landmarks[36:42]
# compute the center of mass for each eye
leftEyeCenter = leftEyePts.mean(axis=0).astype("int")
rightEyeCenter = rightEyePts.mean(axis=0).astype("int")
# compute the angle between the eye centroids
dY = rightEyeCenter[1] - leftEyeCenter[1]
dX = rightEyeCenter[0] - leftEyeCenter[0]
angle = np.degrees(np.arctan2(dY, dX)) - 180
# compute the desired right eye x-coordinate based on the
# desired x-coordinate of the left eye
desiredRightEyeX = 1.0 - 0.35
# determine the scale of the new resulting image by taking
# the ratio of the distance between eyes in the *current*
# image to the ratio of distance between eyes in the
# *desired* image
dist = np.sqrt((dX**2) + (dY**2))
desiredDist = desiredRightEyeX - 0.35
desiredDist *= output_width
scale = desiredDist / dist
# compute center (x, y)-coordinates (i.e., the median point)
# between the two eyes in the input image
# grab the rotation matrix for rotating and scaling the face
eyesCenter = (
int((leftEyeCenter[0] + rightEyeCenter[0]) // 2),
int((leftEyeCenter[1] + rightEyeCenter[1]) // 2),
)
M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
# update the translation component of the matrix
tX = output_width * 0.5
tY = output_height * 0.35
M[0, 2] += tX - eyesCenter[0]
M[1, 2] += tY - eyesCenter[1]
# apply the affine transformation
return cv2.warpAffine(
image, M, (output_width, output_height), flags=cv2.INTER_CUBIC
)
def __clear_classifier(self) -> None:
self.face_recognizer = None
self.label_map = {}
def __detect_face(self, input: np.ndarray) -> tuple[int, int, int, int]:
"""Detect faces in input image."""
if not self.face_detector:
return None
self.face_detector.setInputSize((input.shape[1], input.shape[0]))
faces = self.face_detector.detect(input)
if faces is None or faces[1] is None:
return None
face = None
for _, potential_face in enumerate(faces[1]):
raw_bbox = potential_face[0:4].astype(np.uint16)
x: int = max(raw_bbox[0], 0)
y: int = max(raw_bbox[1], 0)
w: int = raw_bbox[2]
h: int = raw_bbox[3]
bbox = (x, y, x + w, y + h)
if face is None or area(bbox) > area(face):
face = bbox
return face
def __classify_face(self, face_image: np.ndarray) -> tuple[str, float] | None:
if not self.landmark_detector:
return None
if not self.label_map:
self.__build_classifier()
img = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
img = self.__align_face(img, img.shape[1], img.shape[0])
index, distance = self.recognizer.predict(img)
if index == -1:
return None
score = 1.0 - (distance / 1000)
return self.label_map[index], round(score, 2)
def __update_metrics(self, duration: float) -> None:
self.metrics.face_rec_fps.value = (
self.metrics.face_rec_fps.value * 9 + duration
) / 10
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):
"""Look for faces in image."""
start = datetime.datetime.now().timestamp()
id = obj_data["id"]
# don't run for non person objects
if obj_data.get("label") != "person":
logger.debug("Not a processing face for non person object.")
return
# don't overwrite sub label for objects that have a sub label
# that is not a face
if obj_data.get("sub_label") and id not in self.detected_faces:
logger.debug(
f"Not processing face due to existing sub label: {obj_data.get('sub_label')}."
)
return
face: Optional[dict[str, any]] = None
if self.requires_face_detection:
logger.debug("Running manual face detection.")
person_box = obj_data.get("box")
if not person_box:
return
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
left, top, right, bottom = person_box
person = rgb[top:bottom, left:right]
face_box = self.__detect_face(person)
if not face_box:
logger.debug("Detected no faces for person object.")
return
face_frame = person[
max(0, face_box[1]) : min(frame.shape[0], face_box[3]),
max(0, face_box[0]) : min(frame.shape[1], face_box[2]),
]
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
else:
# don't run for object without attributes
if not obj_data.get("current_attributes"):
logger.debug("No attributes to parse.")
return
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
for attr in attributes:
if attr.get("label") != "face":
continue
if face is None or attr.get("score", 0.0) > face.get("score", 0.0):
face = attr
# no faces detected in this frame
if not face:
return
face_box = face.get("box")
# check that face is valid
if not face_box or area(face_box) < self.config.face_recognition.min_area:
logger.debug(f"Invalid face box {face}")
return
face_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
face_frame = face_frame[
max(0, face_box[1]) : min(frame.shape[0], face_box[3]),
max(0, face_box[0]) : min(frame.shape[1], face_box[2]),
]
res = self.__classify_face(face_frame)
if not res:
return
sub_label, score = res
# calculate the overall face score as the probability * area of face
# this will help to reduce false positives from small side-angle faces
# if a large front-on face image may have scored slightly lower but
# is more likely to be accurate due to the larger face area
face_score = round(score * face_frame.shape[0] * face_frame.shape[1], 2)
logger.debug(
f"Detected best face for person as: {sub_label} with probability {score} and overall face score {face_score}"
)
if self.config.face_recognition.save_attempts:
# write face to library
folder = os.path.join(FACE_DIR, "train")
file = os.path.join(folder, f"{id}-{sub_label}-{score}-{face_score}.webp")
os.makedirs(folder, exist_ok=True)
cv2.imwrite(file, face_frame)
if score < self.config.face_recognition.threshold:
logger.debug(
f"Recognized face distance {score} is less than threshold {self.config.face_recognition.threshold}"
)
self.__update_metrics(datetime.datetime.now().timestamp() - start)
return
if id in self.detected_faces and face_score <= self.detected_faces[id]:
logger.debug(
f"Recognized face distance {score} and overall score {face_score} is less than previous overall face score ({self.detected_faces.get(id)})."
)
self.__update_metrics(datetime.datetime.now().timestamp() - start)
return
resp = requests.post(
f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label",
json={
"camera": obj_data.get("camera"),
"subLabel": sub_label,
"subLabelScore": score,
},
)
if resp.status_code == 200:
self.detected_faces[id] = face_score
self.__update_metrics(datetime.datetime.now().timestamp() - start)
def handle_request(self, topic, request_data) -> dict[str, any] | None:
if topic == EmbeddingsRequestEnum.clear_face_classifier.value:
self.__clear_classifier()
elif topic == EmbeddingsRequestEnum.register_face.value:
rand_id = "".join(
random.choices(string.ascii_lowercase + string.digits, k=6)
)
label = request_data["face_name"]
id = f"{label}-{rand_id}"
if request_data.get("cropped"):
thumbnail = request_data["image"]
else:
img = cv2.imdecode(
np.frombuffer(
base64.b64decode(request_data["image"]), dtype=np.uint8
),
cv2.IMREAD_COLOR,
)
face_box = self.__detect_face(img)
if not face_box:
return {
"message": "No face was detected.",
"success": False,
}
face = img[face_box[1] : face_box[3], face_box[0] : face_box[2]]
_, thumbnail = cv2.imencode(
".webp", face, [int(cv2.IMWRITE_WEBP_QUALITY), 100]
)
# write face to library
folder = os.path.join(FACE_DIR, label)
file = os.path.join(folder, f"{id}.webp")
os.makedirs(folder, exist_ok=True)
# save face image
with open(file, "wb") as output:
output.write(thumbnail.tobytes())
self.__clear_classifier()
return {
"message": "Successfully registered face.",
"success": True,
}
def expire_object(self, object_id: str):
if object_id in self.detected_faces:
self.detected_faces.pop(object_id)

View File

@@ -0,0 +1,24 @@
"""Embeddings types."""
import multiprocessing as mp
from enum import Enum
from multiprocessing.sharedctypes import Synchronized
class DataProcessorMetrics:
image_embeddings_fps: Synchronized
text_embeddings_sps: Synchronized
face_rec_fps: Synchronized
alpr_pps: Synchronized
def __init__(self):
self.image_embeddings_fps = mp.Value("d", 0.01)
self.text_embeddings_sps = mp.Value("d", 0.01)
self.face_rec_fps = mp.Value("d", 0.01)
self.alpr_pps = mp.Value("d", 0.01)
class PostProcessDataEnum(str, Enum):
recording = "recording"
review = "review"
tracked_object = "tracked_object"

View File

@@ -194,6 +194,9 @@ class BaseDetectorConfig(BaseModel):
model: Optional[ModelConfig] = Field(
default=None, title="Detector specific model configuration."
)
model_path: Optional[str] = Field(
default=None, title="Detector specific model path."
)
model_config = ConfigDict(
extra="allow", arbitrary_types_allowed=True, protected_namespaces=()
)

View File

@@ -108,7 +108,7 @@ class Rknn(DetectionApi):
model_props["model_type"] = model_type
if model_matched:
model_props["filename"] = model_path + f"-{soc}-v2.0.0-1.rknn"
model_props["filename"] = model_path + f"-{soc}-v2.3.0-1.rknn"
model_props["path"] = model_cache_dir + model_props["filename"]
@@ -129,7 +129,7 @@ class Rknn(DetectionApi):
os.mkdir(model_cache_dir)
urllib.request.urlretrieve(
f"https://github.com/MarcA711/rknn-models/releases/download/v2.0.0/{filename}",
f"https://github.com/MarcA711/rknn-models/releases/download/v2.3.0/{filename}",
model_cache_dir + filename,
)

View File

@@ -219,19 +219,19 @@ class TensorRtDetector(DetectionApi):
]
def __init__(self, detector_config: TensorRTDetectorConfig):
assert (
TRT_SUPPORT
), f"TensorRT libraries not found, {DETECTOR_KEY} detector not present"
assert TRT_SUPPORT, (
f"TensorRT libraries not found, {DETECTOR_KEY} detector not present"
)
(cuda_err,) = cuda.cuInit(0)
assert (
cuda_err == cuda.CUresult.CUDA_SUCCESS
), f"Failed to initialize cuda {cuda_err}"
assert cuda_err == cuda.CUresult.CUDA_SUCCESS, (
f"Failed to initialize cuda {cuda_err}"
)
err, dev_count = cuda.cuDeviceGetCount()
logger.debug(f"Num Available Devices: {dev_count}")
assert (
detector_config.device < dev_count
), f"Invalid TensorRT Device Config. Device {detector_config.device} Invalid."
assert detector_config.device < dev_count, (
f"Invalid TensorRT Device Config. Device {detector_config.device} Invalid."
)
err, self.cu_ctx = cuda.cuCtxCreate(
cuda.CUctx_flags.CU_CTX_MAP_HOST, detector_config.device
)

View File

@@ -1,5 +1,6 @@
"""SQLite-vec embeddings database."""
import base64
import json
import logging
import multiprocessing as mp
@@ -13,7 +14,8 @@ from setproctitle import setproctitle
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor
from frigate.config import FrigateConfig
from frigate.const import CONFIG_DIR
from frigate.const import CONFIG_DIR, FACE_DIR
from frigate.data_processing.types import DataProcessorMetrics
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.models import Event
from frigate.util.builtin import serialize
@@ -25,7 +27,7 @@ from .util import ZScoreNormalization
logger = logging.getLogger(__name__)
def manage_embeddings(config: FrigateConfig) -> None:
def manage_embeddings(config: FrigateConfig, metrics: DataProcessorMetrics) -> None:
# Only initialize embeddings if semantic search is enabled
if not config.semantic_search.enabled:
return
@@ -59,6 +61,7 @@ def manage_embeddings(config: FrigateConfig) -> None:
maintainer = EmbeddingMaintainer(
db,
config,
metrics,
stop_event,
)
maintainer.start()
@@ -189,6 +192,38 @@ class EmbeddingsContext:
return results
def register_face(self, face_name: str, image_data: bytes) -> dict[str, any]:
return self.requestor.send_data(
EmbeddingsRequestEnum.register_face.value,
{
"face_name": face_name,
"image": base64.b64encode(image_data).decode("ASCII"),
},
)
def get_face_ids(self, name: str) -> list[str]:
sql_query = f"""
SELECT
id
FROM vec_descriptions
WHERE id LIKE '%{name}%'
"""
return self.db.execute_sql(sql_query).fetchall()
def clear_face_classifier(self) -> None:
self.requestor.send_data(
EmbeddingsRequestEnum.clear_face_classifier.value, None
)
def delete_face_ids(self, face: str, ids: list[str]) -> None:
folder = os.path.join(FACE_DIR, face)
for id in ids:
file_path = os.path.join(folder, id)
if os.path.isfile(file_path):
os.unlink(file_path)
def update_description(self, event_id: str, description: str) -> None:
self.requestor.send_data(
EmbeddingsRequestEnum.embed_description.value,

Some files were not shown because too many files have changed in this diff Show More