forked from Github/frigate
Compare commits
95 Commits
dependabot
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1b4bc9f6fa | ||
|
|
61a4a4bc2f | ||
|
|
176af55e8c | ||
|
|
1a51ce712c | ||
|
|
535bf6e4b9 | ||
|
|
515f06ba6c | ||
|
|
6c43e5dba9 | ||
|
|
d498fabe72 | ||
|
|
27e71eb142 | ||
|
|
7c63cb5bca | ||
|
|
ddf3a687a3 | ||
|
|
4515eb4637 | ||
|
|
efd1194307 | ||
|
|
5e0d8fe4c7 | ||
|
|
e44a9e8921 | ||
|
|
ff9e1da1de | ||
|
|
1ed8642010 | ||
|
|
38ff46e45c | ||
|
|
2362d0e838 | ||
|
|
90d7fc6bc5 | ||
|
|
bcae0cf441 | ||
|
|
edababa88e | ||
|
|
350abda21a | ||
|
|
1c24f0054a | ||
|
|
f7eaace7ae | ||
|
|
8573016bef | ||
|
|
6bf2708c0e | ||
|
|
36d7eb7caa | ||
|
|
4fc8d33d31 | ||
|
|
2f69f5afe6 | ||
|
|
9bcb928715 | ||
|
|
e3edcf057c | ||
|
|
06ccf7e9e9 | ||
|
|
e4ea35e626 | ||
|
|
329bece28d | ||
|
|
0c86c77d42 | ||
|
|
fc145016ea | ||
|
|
c17524bc3c | ||
|
|
d5acd11164 | ||
|
|
2a66923524 | ||
|
|
088a0fb4a5 | ||
|
|
4f10f82580 | ||
|
|
5aee70ac7a | ||
|
|
5ff476c6f9 | ||
|
|
641f1244dd | ||
|
|
a1fd29b34b | ||
|
|
90c1cc3e3b | ||
|
|
ba49054cd7 | ||
|
|
61854f1d6a | ||
|
|
1f9ba1d625 | ||
|
|
644ea7be4a | ||
|
|
87ab4e7c9b | ||
|
|
d84e3cacca | ||
|
|
b4acf4f341 | ||
|
|
62657ad05a | ||
|
|
f3784505e0 | ||
|
|
863f51363a | ||
|
|
22ee6bb137 | ||
|
|
3972642ba0 | ||
|
|
e016bd6900 | ||
|
|
d2588d9de4 | ||
|
|
07d1692f2b | ||
|
|
8db9824842 | ||
|
|
c8521554c8 | ||
|
|
ceb7aa8b36 | ||
|
|
cae11cbb86 | ||
|
|
03ff3e639f | ||
|
|
f5dbcd5465 | ||
|
|
f143fceceb | ||
|
|
8be139d4d1 | ||
|
|
17901fcfef | ||
|
|
d6b16a7747 | ||
|
|
17fa830851 | ||
|
|
149339a8d9 | ||
|
|
764cca5a70 | ||
|
|
18a6aa1824 | ||
|
|
5c00ed352c | ||
|
|
7e9a7ad49c | ||
|
|
fe2fec81ac | ||
|
|
055f0dfc22 | ||
|
|
ddf9163c47 | ||
|
|
e80322dab7 | ||
|
|
7626dd239a | ||
|
|
9afa1354da | ||
|
|
58a471e466 | ||
|
|
e66f47bdf6 | ||
|
|
21a50cc452 | ||
|
|
5239790835 | ||
|
|
0acbd3d5e8 | ||
|
|
e3da5ef2d5 | ||
|
|
ecaba82c9d | ||
|
|
921c9de241 | ||
|
|
6a0b5c3a3f | ||
|
|
a8dcc87019 | ||
|
|
4ec136cab0 |
@@ -1,168 +1,303 @@
|
||||
rtmp
|
||||
edgetpu
|
||||
labelmap
|
||||
rockchip
|
||||
jetson
|
||||
rocm
|
||||
vaapi
|
||||
CUDA
|
||||
hwaccel
|
||||
RTSP
|
||||
Hikvision
|
||||
Dahua
|
||||
Amcrest
|
||||
Reolink
|
||||
Loryta
|
||||
Beelink
|
||||
Celeron
|
||||
vaapi
|
||||
blakeblackshear
|
||||
workdir
|
||||
onvif
|
||||
autotracking
|
||||
openvino
|
||||
tflite
|
||||
deepstack
|
||||
codeproject
|
||||
udev
|
||||
tailscale
|
||||
restream
|
||||
restreaming
|
||||
webrtc
|
||||
ssdlite
|
||||
mobilenet
|
||||
mosquitto
|
||||
datasheet
|
||||
Jellyfin
|
||||
Radeon
|
||||
libva
|
||||
Ubiquiti
|
||||
Unifi
|
||||
Tapo
|
||||
Annke
|
||||
autotracker
|
||||
autotracked
|
||||
variations
|
||||
ONVIF
|
||||
traefik
|
||||
devcontainer
|
||||
rootfs
|
||||
ffprobe
|
||||
autotrack
|
||||
logpipe
|
||||
imread
|
||||
imwrite
|
||||
imencode
|
||||
imutils
|
||||
thresholded
|
||||
timelapse
|
||||
ultrafast
|
||||
sleeptime
|
||||
radeontop
|
||||
vainfo
|
||||
tmpfs
|
||||
homography
|
||||
websockets
|
||||
LIBAVFORMAT
|
||||
NTSC
|
||||
onnxruntime
|
||||
fourcc
|
||||
radeonsi
|
||||
paho
|
||||
imagestream
|
||||
jsonify
|
||||
cgroups
|
||||
sysconf
|
||||
memlimit
|
||||
gpuload
|
||||
nvml
|
||||
setproctitle
|
||||
psutil
|
||||
Kalman
|
||||
frontdoor
|
||||
namedtuples
|
||||
zeep
|
||||
fflags
|
||||
probesize
|
||||
wallclock
|
||||
rknn
|
||||
socs
|
||||
pydantic
|
||||
shms
|
||||
imdecode
|
||||
colormap
|
||||
webui
|
||||
mse
|
||||
jsmpeg
|
||||
unreviewed
|
||||
Chromecast
|
||||
Swipeable
|
||||
flac
|
||||
scroller
|
||||
cmdline
|
||||
toggleable
|
||||
bottombar
|
||||
opencv
|
||||
apexcharts
|
||||
buildx
|
||||
mqtt
|
||||
rawvideo
|
||||
defragment
|
||||
Norfair
|
||||
subclassing
|
||||
yolo
|
||||
tensorrt
|
||||
blackshear
|
||||
stylelint
|
||||
HACS
|
||||
homeassistant
|
||||
hass
|
||||
castable
|
||||
mobiledet
|
||||
framebuffer
|
||||
mjpeg
|
||||
substream
|
||||
codeowner
|
||||
noninteractive
|
||||
restreamed
|
||||
mountpoint
|
||||
fstype
|
||||
OWASP
|
||||
iotop
|
||||
letsencrypt
|
||||
fullchain
|
||||
lsusb
|
||||
iostat
|
||||
usermod
|
||||
balena
|
||||
passwordless
|
||||
debconf
|
||||
dpkg
|
||||
poweroff
|
||||
surveillance
|
||||
qnap
|
||||
homekit
|
||||
colorspace
|
||||
quantisation
|
||||
skylake
|
||||
Cuvid
|
||||
foscam
|
||||
onnx
|
||||
numpy
|
||||
protobuf
|
||||
aarch
|
||||
absdiff
|
||||
airockchip
|
||||
Alloc
|
||||
Amcrest
|
||||
amdgpu
|
||||
chipset
|
||||
referer
|
||||
mpegts
|
||||
webp
|
||||
analyzeduration
|
||||
Annke
|
||||
apexcharts
|
||||
arange
|
||||
argmax
|
||||
argmin
|
||||
argpartition
|
||||
ascontiguousarray
|
||||
authelia
|
||||
authentik
|
||||
unichip
|
||||
rebranded
|
||||
udevadm
|
||||
autodetected
|
||||
automations
|
||||
unraid
|
||||
hideable
|
||||
autotrack
|
||||
autotracked
|
||||
autotracker
|
||||
autotracking
|
||||
balena
|
||||
Beelink
|
||||
BGRA
|
||||
BHWC
|
||||
blackshear
|
||||
blakeblackshear
|
||||
bottombar
|
||||
buildx
|
||||
castable
|
||||
cdist
|
||||
Celeron
|
||||
cgroups
|
||||
chipset
|
||||
chromadb
|
||||
Chromecast
|
||||
cmdline
|
||||
codeowner
|
||||
CODEOWNERS
|
||||
codeproject
|
||||
colormap
|
||||
colorspace
|
||||
comms
|
||||
ctypeslib
|
||||
CUDA
|
||||
Cuvid
|
||||
Dahua
|
||||
datasheet
|
||||
debconf
|
||||
deci
|
||||
deepstack
|
||||
defragment
|
||||
devcontainer
|
||||
DEVICEMAP
|
||||
discardcorrupt
|
||||
dpkg
|
||||
dsize
|
||||
dtype
|
||||
ECONNRESET
|
||||
edgetpu
|
||||
faststart
|
||||
fflags
|
||||
ffprobe
|
||||
fillna
|
||||
flac
|
||||
foscam
|
||||
fourcc
|
||||
framebuffer
|
||||
fregate
|
||||
frégate
|
||||
fromarray
|
||||
frombuffer
|
||||
frontdoor
|
||||
fstype
|
||||
fullchain
|
||||
fullscreen
|
||||
genai
|
||||
generativeai
|
||||
genpts
|
||||
getpid
|
||||
gpuload
|
||||
HACS
|
||||
Hailo
|
||||
hass
|
||||
hconcat
|
||||
healthcheck
|
||||
keepalive
|
||||
hideable
|
||||
Hikvision
|
||||
homeassistant
|
||||
homekit
|
||||
homography
|
||||
hsize
|
||||
hstack
|
||||
httpx
|
||||
hwaccel
|
||||
hwdownload
|
||||
hwmap
|
||||
hwupload
|
||||
iloc
|
||||
imagestream
|
||||
imdecode
|
||||
imencode
|
||||
imread
|
||||
imutils
|
||||
imwrite
|
||||
interp
|
||||
iostat
|
||||
iotop
|
||||
itemsize
|
||||
Jellyfin
|
||||
jetson
|
||||
jetsons
|
||||
joserfc
|
||||
jsmpeg
|
||||
jsonify
|
||||
Kalman
|
||||
keepalive
|
||||
keepdims
|
||||
labelmap
|
||||
letsencrypt
|
||||
levelname
|
||||
LIBAVFORMAT
|
||||
libedgetpu
|
||||
libnvinfer
|
||||
libva
|
||||
libwebp
|
||||
libx
|
||||
libyolo
|
||||
linalg
|
||||
localzone
|
||||
logpipe
|
||||
Loryta
|
||||
lstsq
|
||||
lsusb
|
||||
markupsafe
|
||||
maxsplit
|
||||
MEMHOSTALLOC
|
||||
memlimit
|
||||
meshgrid
|
||||
metadatas
|
||||
migraphx
|
||||
minilm
|
||||
mjpeg
|
||||
mkfifo
|
||||
mobiledet
|
||||
mobilenet
|
||||
modelpath
|
||||
mosquitto
|
||||
mountpoint
|
||||
movflags
|
||||
mpegts
|
||||
mqtt
|
||||
mse
|
||||
msenc
|
||||
namedtuples
|
||||
nbytes
|
||||
nchw
|
||||
ndarray
|
||||
ndimage
|
||||
nethogs
|
||||
newaxis
|
||||
nhwc
|
||||
NOBLOCK
|
||||
nobuffer
|
||||
nokey
|
||||
NONBLOCK
|
||||
noninteractive
|
||||
noprint
|
||||
Norfair
|
||||
nptype
|
||||
NTSC
|
||||
numpy
|
||||
nvenc
|
||||
nvhost
|
||||
nvml
|
||||
nvmpi
|
||||
ollama
|
||||
onnx
|
||||
onnxruntime
|
||||
onvif
|
||||
ONVIF
|
||||
openai
|
||||
opencv
|
||||
openvino
|
||||
OWASP
|
||||
paho
|
||||
passwordless
|
||||
popleft
|
||||
posthog
|
||||
postprocess
|
||||
poweroff
|
||||
preexec
|
||||
probesize
|
||||
protobuf
|
||||
psutil
|
||||
pubkey
|
||||
putenv
|
||||
pycache
|
||||
pydantic
|
||||
pyobj
|
||||
pysqlite
|
||||
pytz
|
||||
pywebpush
|
||||
qnap
|
||||
quantisation
|
||||
Radeon
|
||||
radeonsi
|
||||
radeontop
|
||||
rawvideo
|
||||
rcond
|
||||
RDONLY
|
||||
rebranded
|
||||
referer
|
||||
Reolink
|
||||
restream
|
||||
restreamed
|
||||
restreaming
|
||||
rkmpp
|
||||
rknn
|
||||
rkrga
|
||||
rockchip
|
||||
rocm
|
||||
rocminfo
|
||||
rootfs
|
||||
rtmp
|
||||
RTSP
|
||||
ruamel
|
||||
scroller
|
||||
setproctitle
|
||||
setpts
|
||||
shms
|
||||
SIGUSR
|
||||
skylake
|
||||
sleeptime
|
||||
SNDMORE
|
||||
socs
|
||||
sqliteq
|
||||
ssdlite
|
||||
statm
|
||||
stimeout
|
||||
stylelint
|
||||
subclassing
|
||||
substream
|
||||
superfast
|
||||
surveillance
|
||||
svscan
|
||||
Swipeable
|
||||
sysconf
|
||||
tailscale
|
||||
Tapo
|
||||
tensorrt
|
||||
tflite
|
||||
thresholded
|
||||
timelapse
|
||||
tmpfs
|
||||
tobytes
|
||||
toggleable
|
||||
traefik
|
||||
tzlocal
|
||||
Ubiquiti
|
||||
udev
|
||||
udevadm
|
||||
ultrafast
|
||||
unichip
|
||||
unidecode
|
||||
Unifi
|
||||
unixepoch
|
||||
unraid
|
||||
unreviewed
|
||||
userdata
|
||||
usermod
|
||||
vaapi
|
||||
vainfo
|
||||
variations
|
||||
vconcat
|
||||
vitb
|
||||
vstream
|
||||
vsync
|
||||
wallclock
|
||||
webp
|
||||
webpush
|
||||
webrtc
|
||||
websockets
|
||||
webui
|
||||
werkzeug
|
||||
workdir
|
||||
WRONLY
|
||||
wsgirefserver
|
||||
wsgiutils
|
||||
wsize
|
||||
xaddr
|
||||
xmaxs
|
||||
xmins
|
||||
XPUB
|
||||
XSUB
|
||||
ymaxs
|
||||
ymins
|
||||
yolo
|
||||
yolonas
|
||||
yolox
|
||||
zeep
|
||||
zerolatency
|
||||
|
||||
@@ -17,7 +17,7 @@ sudo chown -R "$(id -u):$(id -g)" /media/frigate
|
||||
# When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the
|
||||
# s6 service file. For dev, where frigate is started from an interactive
|
||||
# shell, we define it in .bashrc instead.
|
||||
echo 'export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc
|
||||
echo 'export LIBAVFORMAT_VERSION_MAJOR=$(/usr/lib/ffmpeg/7.0/bin/ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc
|
||||
|
||||
make version
|
||||
|
||||
|
||||
63
.github/workflows/ci.yml
vendored
63
.github/workflows/ci.yml
vendored
@@ -179,57 +179,18 @@ jobs:
|
||||
h8l.tags=${{ steps.setup.outputs.image-name }}-h8l
|
||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l,mode=max
|
||||
#- name: AMD/ROCm general build
|
||||
# env:
|
||||
# AMDGPU: gfx
|
||||
# HSA_OVERRIDE: 0
|
||||
# uses: docker/bake-action@v3
|
||||
# with:
|
||||
# push: true
|
||||
# targets: rocm
|
||||
# files: docker/rocm/rocm.hcl
|
||||
# set: |
|
||||
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
|
||||
# *.cache-from=type=gha
|
||||
#- name: AMD/ROCm gfx900
|
||||
# env:
|
||||
# AMDGPU: gfx900
|
||||
# HSA_OVERRIDE: 1
|
||||
# HSA_OVERRIDE_GFX_VERSION: 9.0.0
|
||||
# uses: docker/bake-action@v3
|
||||
# with:
|
||||
# push: true
|
||||
# targets: rocm
|
||||
# files: docker/rocm/rocm.hcl
|
||||
# set: |
|
||||
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx900
|
||||
# *.cache-from=type=gha
|
||||
#- name: AMD/ROCm gfx1030
|
||||
# env:
|
||||
# AMDGPU: gfx1030
|
||||
# HSA_OVERRIDE: 1
|
||||
# HSA_OVERRIDE_GFX_VERSION: 10.3.0
|
||||
# uses: docker/bake-action@v3
|
||||
# with:
|
||||
# push: true
|
||||
# targets: rocm
|
||||
# files: docker/rocm/rocm.hcl
|
||||
# set: |
|
||||
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx1030
|
||||
# *.cache-from=type=gha
|
||||
#- name: AMD/ROCm gfx1100
|
||||
# env:
|
||||
# AMDGPU: gfx1100
|
||||
# HSA_OVERRIDE: 1
|
||||
# HSA_OVERRIDE_GFX_VERSION: 11.0.0
|
||||
# uses: docker/bake-action@v3
|
||||
# with:
|
||||
# push: true
|
||||
# targets: rocm
|
||||
# files: docker/rocm/rocm.hcl
|
||||
# set: |
|
||||
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx1100
|
||||
# *.cache-from=type=gha
|
||||
- name: AMD/ROCm general build
|
||||
env:
|
||||
AMDGPU: gfx
|
||||
HSA_OVERRIDE: 0
|
||||
uses: docker/bake-action@v3
|
||||
with:
|
||||
push: true
|
||||
targets: rocm
|
||||
files: docker/rocm/rocm.hcl
|
||||
set: |
|
||||
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
|
||||
*.cache-from=type=gha
|
||||
# The majority of users running arm64 are rpi users, so the rpi
|
||||
# build should be the primary arm64 image
|
||||
assemble_default_build:
|
||||
|
||||
5
.vscode/launch.json
vendored
5
.vscode/launch.json
vendored
@@ -3,10 +3,9 @@
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Python: Launch Frigate",
|
||||
"type": "python",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "frigate",
|
||||
"justMyCode": true
|
||||
"module": "frigate"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
29
Makefile
29
Makefile
@@ -4,8 +4,6 @@ COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
|
||||
VERSION = 0.15.0
|
||||
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
|
||||
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
CURRENT_UID := $(shell id -u)
|
||||
CURRENT_GID := $(shell id -g)
|
||||
BOARDS= #Initialized empty
|
||||
|
||||
include docker/*/*.mk
|
||||
@@ -18,25 +16,38 @@ version:
|
||||
echo 'VERSION = "$(VERSION)-$(COMMIT_HASH)"' > frigate/version.py
|
||||
|
||||
local: version
|
||||
docker buildx build --target=frigate --tag frigate:latest --load --file docker/main/Dockerfile .
|
||||
docker buildx build --target=frigate --file docker/main/Dockerfile . \
|
||||
--tag frigate:latest \
|
||||
--load
|
||||
|
||||
amd64:
|
||||
docker buildx build --platform linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile .
|
||||
docker buildx build --target=frigate --file docker/main/Dockerfile . \
|
||||
--tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \
|
||||
--platform linux/amd64
|
||||
|
||||
arm64:
|
||||
docker buildx build --platform linux/arm64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile .
|
||||
docker buildx build --target=frigate --file docker/main/Dockerfile . \
|
||||
--tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \
|
||||
--platform linux/arm64
|
||||
|
||||
build: version amd64 arm64
|
||||
docker buildx build --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile .
|
||||
docker buildx build --target=frigate --file docker/main/Dockerfile . \
|
||||
--tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \
|
||||
--platform linux/arm64/v8,linux/amd64
|
||||
|
||||
push: push-boards
|
||||
docker buildx build --push --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) --file docker/main/Dockerfile .
|
||||
docker buildx build --target=frigate --file docker/main/Dockerfile . \
|
||||
--tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) \
|
||||
--platform linux/arm64/v8,linux/amd64 \
|
||||
--push
|
||||
|
||||
run: local
|
||||
docker run --rm --publish=5000:5000 --volume=${PWD}/config:/config frigate:latest
|
||||
|
||||
run_tests: local
|
||||
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest python3 -u -m unittest
|
||||
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest python3 -u -m mypy --config-file frigate/mypy.ini frigate
|
||||
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \
|
||||
python3 -u -m unittest
|
||||
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \
|
||||
python3 -u -m mypy --config-file frigate/mypy.ini frigate
|
||||
|
||||
.PHONY: run_tests
|
||||
|
||||
@@ -7,7 +7,8 @@
|
||||
"*.db",
|
||||
"node_modules",
|
||||
"__pycache__",
|
||||
"dist"
|
||||
"dist",
|
||||
"/audio-labelmap.txt"
|
||||
],
|
||||
"language": "en",
|
||||
"dictionaryDefinitions": [
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
BOARDS += h8l
|
||||
|
||||
local-h8l: version
|
||||
docker buildx bake --load --file=docker/hailo8l/h8l.hcl --set h8l.tags=frigate:latest-h8l h8l
|
||||
docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \
|
||||
--set h8l.tags=frigate:latest-h8l \
|
||||
--load
|
||||
|
||||
build-h8l: version
|
||||
docker buildx bake --file=docker/hailo8l/h8l.hcl --set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l h8l
|
||||
docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \
|
||||
--set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l
|
||||
|
||||
push-h8l: build-h8l
|
||||
docker buildx bake --push --file=docker/hailo8l/h8l.hcl --set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l h8l
|
||||
docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \
|
||||
--set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l \
|
||||
--push
|
||||
@@ -3,7 +3,7 @@ argcomplete==2.0.0
|
||||
contextlib2==0.6.0.post1
|
||||
distlib==0.3.6
|
||||
filelock==3.8.0
|
||||
future==0.18.3
|
||||
future==0.18.2
|
||||
importlib-metadata==5.1.0
|
||||
importlib-resources==5.1.2
|
||||
netaddr==0.8.0
|
||||
|
||||
@@ -170,6 +170,9 @@ RUN /build_pysqlite3.sh
|
||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt
|
||||
|
||||
COPY docker/main/requirements-wheels-post.txt /requirements-wheels-post.txt
|
||||
RUN pip3 wheel --no-deps --wheel-dir=/wheels-post -r /requirements-wheels-post.txt
|
||||
|
||||
|
||||
# Collect deps in a single layer
|
||||
FROM scratch AS deps-rootfs
|
||||
@@ -201,7 +204,8 @@ ENV ALLOW_RESET=True
|
||||
# Disable tokenizer parallelism warning
|
||||
ENV TOKENIZERS_PARALLELISM=true
|
||||
|
||||
ENV PATH="/usr/lib/btbn-ffmpeg/bin:/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
||||
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
||||
ENV LIBAVFORMAT_VERSION_MAJOR=60
|
||||
|
||||
# Install dependencies
|
||||
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
|
||||
@@ -211,6 +215,14 @@ RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
|
||||
python3 -m pip install --upgrade pip && \
|
||||
pip3 install -U /deps/wheels/*.whl
|
||||
|
||||
# We have to uninstall this dependency specifically
|
||||
# as it will break onnxruntime-openvino
|
||||
RUN pip3 uninstall -y onnxruntime
|
||||
|
||||
RUN --mount=type=bind,from=wheels,source=/wheels-post,target=/deps/wheels \
|
||||
python3 -m pip install --upgrade pip && \
|
||||
pip3 install -U /deps/wheels/*.whl
|
||||
|
||||
COPY --from=deps-rootfs / /
|
||||
|
||||
RUN ldconfig
|
||||
|
||||
@@ -39,32 +39,54 @@ apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||
|
||||
# btbn-ffmpeg -> amd64
|
||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
mkdir -p /usr/lib/btbn-ffmpeg
|
||||
mkdir -p /usr/lib/ffmpeg/5.0
|
||||
mkdir -p /usr/lib/ffmpeg/7.0
|
||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz"
|
||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/btbn-ffmpeg --strip-components 1
|
||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/btbn-ffmpeg/doc /usr/lib/btbn-ffmpeg/bin/ffplay
|
||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
|
||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
|
||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz"
|
||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
|
||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
|
||||
fi
|
||||
|
||||
# ffmpeg -> arm64
|
||||
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
mkdir -p /usr/lib/btbn-ffmpeg
|
||||
mkdir -p /usr/lib/ffmpeg/5.0
|
||||
mkdir -p /usr/lib/ffmpeg/7.0
|
||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz"
|
||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/btbn-ffmpeg --strip-components 1
|
||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/btbn-ffmpeg/doc /usr/lib/btbn-ffmpeg/bin/ffplay
|
||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
|
||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
|
||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz"
|
||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
|
||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
|
||||
fi
|
||||
|
||||
# arch specific packages
|
||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
# use debian bookworm for hwaccel packages
|
||||
# use debian bookworm for amd / intel-i965 driver packages
|
||||
echo 'deb https://deb.debian.org/debian bookworm main contrib non-free' >/etc/apt/sources.list.d/debian-bookworm.list
|
||||
apt-get -qq update
|
||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||
intel-opencl-icd \
|
||||
mesa-va-drivers radeontop libva-drm2 intel-media-va-driver-non-free i965-va-driver libmfx1 intel-gpu-tools
|
||||
i965-va-driver intel-gpu-tools onevpl-tools \
|
||||
libva-drm2 \
|
||||
mesa-va-drivers radeontop
|
||||
|
||||
# something about this dependency requires it to be installed in a separate call rather than in the line above
|
||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||
i965-va-driver-shaders
|
||||
|
||||
rm -f /etc/apt/sources.list.d/debian-bookworm.list
|
||||
|
||||
# use intel apt intel packages
|
||||
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
|
||||
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||
apt-get -qq update
|
||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||
intel-opencl-icd intel-level-zero-gpu intel-media-va-driver-non-free \
|
||||
libmfx1 libmfxgen1 libvpl2
|
||||
|
||||
rm -f /usr/share/keyrings/intel-graphics.gpg
|
||||
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||
fi
|
||||
|
||||
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
@@ -72,6 +94,10 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
libva-drm2 mesa-va-drivers
|
||||
fi
|
||||
|
||||
# install vulkan
|
||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||
libvulkan1 mesa-vulkan-drivers
|
||||
|
||||
apt-get purge gnupg apt-transport-https xz-utils -y
|
||||
apt-get clean autoclean -y
|
||||
apt-get autoremove --purge -y
|
||||
|
||||
3
docker/main/requirements-wheels-post.txt
Normal file
3
docker/main/requirements-wheels-post.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
# ONNX
|
||||
onnxruntime-openvino == 1.19.* ; platform_machine == 'x86_64'
|
||||
onnxruntime == 1.19.* ; platform_machine == 'aarch64'
|
||||
@@ -28,15 +28,15 @@ norfair == 2.2.*
|
||||
setproctitle == 1.3.*
|
||||
ws4py == 0.5.*
|
||||
unidecode == 1.3.*
|
||||
onnxruntime == 1.18.*
|
||||
openvino == 2024.1.*
|
||||
# OpenVino (ONNX installed in wheels-post)
|
||||
openvino == 2024.3.*
|
||||
# Embeddings
|
||||
onnx_clip == 4.0.*
|
||||
chromadb == 0.5.0
|
||||
onnx_clip == 4.0.*
|
||||
# Generative AI
|
||||
google-generativeai == 0.6.*
|
||||
ollama == 0.2.*
|
||||
openai == 1.30.*
|
||||
# push notifications
|
||||
py-vapid == 1.9.*
|
||||
pywebpush == 2.0.*
|
||||
pywebpush == 2.0.*
|
||||
|
||||
@@ -44,8 +44,6 @@ function migrate_db_path() {
|
||||
|
||||
echo "[INFO] Preparing Frigate..."
|
||||
migrate_db_path
|
||||
export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+')
|
||||
|
||||
echo "[INFO] Starting Frigate..."
|
||||
|
||||
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"
|
||||
|
||||
@@ -43,8 +43,6 @@ function get_ip_and_port_from_supervisor() {
|
||||
export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}"
|
||||
}
|
||||
|
||||
export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+')
|
||||
|
||||
if [[ -f "/dev/shm/go2rtc.yaml" ]]; then
|
||||
echo "[INFO] Removing stale config from last run..."
|
||||
rm /dev/shm/go2rtc.yaml
|
||||
|
||||
@@ -2,16 +2,19 @@
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
|
||||
sys.path.insert(0, "/opt/frigate")
|
||||
from frigate.const import BIRDSEYE_PIPE # noqa: E402
|
||||
from frigate.ffmpeg_presets import ( # noqa: E402
|
||||
parse_preset_hardware_acceleration_encode,
|
||||
from frigate.const import (
|
||||
BIRDSEYE_PIPE,
|
||||
DEFAULT_FFMPEG_VERSION,
|
||||
INCLUDED_FFMPEG_VERSIONS,
|
||||
)
|
||||
from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode
|
||||
|
||||
sys.path.remove("/opt/frigate")
|
||||
|
||||
@@ -105,16 +108,32 @@ else:
|
||||
**FRIGATE_ENV_VARS
|
||||
)
|
||||
|
||||
# ensure ffmpeg path is set correctly
|
||||
path = config.get("ffmpeg", {}).get("path", "default")
|
||||
if path == "default":
|
||||
if shutil.which("ffmpeg") is None:
|
||||
ffmpeg_path = f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
|
||||
else:
|
||||
ffmpeg_path = "ffmpeg"
|
||||
elif path in INCLUDED_FFMPEG_VERSIONS:
|
||||
ffmpeg_path = f"/usr/lib/ffmpeg/{path}/bin/ffmpeg"
|
||||
else:
|
||||
ffmpeg_path = f"{path}/bin/ffmpeg"
|
||||
|
||||
if go2rtc_config.get("ffmpeg") is None:
|
||||
go2rtc_config["ffmpeg"] = {"bin": ffmpeg_path}
|
||||
elif go2rtc_config["ffmpeg"].get("bin") is None:
|
||||
go2rtc_config["ffmpeg"]["bin"] = ffmpeg_path
|
||||
|
||||
# need to replace ffmpeg command when using ffmpeg4
|
||||
if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59:
|
||||
if go2rtc_config.get("ffmpeg") is None:
|
||||
go2rtc_config["ffmpeg"] = {
|
||||
"rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
|
||||
}
|
||||
elif go2rtc_config["ffmpeg"].get("rtsp") is None:
|
||||
if int(os.environ.get("LIBAVFORMAT_VERSION_MAJOR", "59") or "59") < 59:
|
||||
if go2rtc_config["ffmpeg"].get("rtsp") is None:
|
||||
go2rtc_config["ffmpeg"]["rtsp"] = (
|
||||
"-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
|
||||
)
|
||||
else:
|
||||
if go2rtc_config.get("ffmpeg") is None:
|
||||
go2rtc_config["ffmpeg"] = {"path": ""}
|
||||
|
||||
for name in go2rtc_config.get("streams", {}):
|
||||
stream = go2rtc_config["streams"][name]
|
||||
@@ -145,7 +164,7 @@ if config.get("birdseye", {}).get("restream", False):
|
||||
birdseye: dict[str, any] = config.get("birdseye")
|
||||
|
||||
input = f"-f rawvideo -pix_fmt yuv420p -video_size {birdseye.get('width', 1280)}x{birdseye.get('height', 720)} -r 10 -i {BIRDSEYE_PIPE}"
|
||||
ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(config.get('ffmpeg', {}).get('hwaccel_args'), input, '-rtsp_transport tcp -f rtsp {output}')}"
|
||||
ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(ffmpeg_path, config.get('ffmpeg', {}).get('hwaccel_args'), input, '-rtsp_transport tcp -f rtsp {output}')}"
|
||||
|
||||
if go2rtc_config.get("streams"):
|
||||
go2rtc_config["streams"]["birdseye"] = ffmpeg_cmd
|
||||
|
||||
@@ -22,5 +22,6 @@ ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/librknnrt
|
||||
|
||||
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
|
||||
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffmpeg /usr/lib/btbn-ffmpeg/bin/
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffprobe /usr/lib/btbn-ffmpeg/bin/
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffmpeg /usr/lib/ffmpeg/6.0/bin/
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffprobe /usr/lib/ffmpeg/6.0/bin/
|
||||
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}"
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
BOARDS += rk
|
||||
|
||||
local-rk: version
|
||||
docker buildx bake --load --file=docker/rockchip/rk.hcl --set rk.tags=frigate:latest-rk rk
|
||||
docker buildx bake --file=docker/rockchip/rk.hcl rk \
|
||||
--set rk.tags=frigate:latest-rk \
|
||||
--load
|
||||
|
||||
build-rk: version
|
||||
docker buildx bake --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk
|
||||
docker buildx bake --file=docker/rockchip/rk.hcl rk \
|
||||
--set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk
|
||||
|
||||
push-rk: build-rk
|
||||
docker buildx bake --push --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk
|
||||
docker buildx bake --file=docker/rockchip/rk.hcl rk \
|
||||
--set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk \
|
||||
--push
|
||||
@@ -23,11 +23,11 @@ COPY docker/rocm/rocm-pin-600 /etc/apt/preferences.d/
|
||||
|
||||
RUN apt-get update
|
||||
|
||||
RUN apt-get -y install --no-install-recommends migraphx
|
||||
RUN apt-get -y install --no-install-recommends migraphx hipfft roctracer
|
||||
RUN apt-get -y install --no-install-recommends migraphx-dev
|
||||
|
||||
RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib
|
||||
RUN cd /opt/rocm-$ROCM/lib && cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/
|
||||
RUN cd /opt/rocm-$ROCM/lib && cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocfft*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/
|
||||
RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm
|
||||
|
||||
RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/
|
||||
@@ -69,7 +69,11 @@ RUN apt-get -y install libnuma1
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
COPY docker/rocm/rootfs/ /
|
||||
|
||||
COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
|
||||
RUN python3 -m pip install --upgrade pip \
|
||||
&& pip3 uninstall -y onnxruntime-openvino \
|
||||
&& pip3 install -r /requirements.txt
|
||||
|
||||
#######################################################################
|
||||
FROM scratch AS rocm-dist
|
||||
@@ -101,6 +105,3 @@ ENV HSA_OVERRIDE_GFX_VERSION=$HSA_OVERRIDE_GFX_VERSION
|
||||
#######################################################################
|
||||
FROM rocm-prelim-hsa-override$HSA_OVERRIDE as rocm-deps
|
||||
|
||||
# Request yolov8 download at startup
|
||||
ENV DOWNLOAD_YOLOV8=1
|
||||
|
||||
|
||||
1
docker/rocm/requirements-wheels-rocm.txt
Normal file
1
docker/rocm/requirements-wheels-rocm.txt
Normal file
@@ -0,0 +1 @@
|
||||
onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v1.0.0/onnxruntime_rocm-1.17.3-cp39-cp39-linux_x86_64.whl
|
||||
@@ -4,14 +4,50 @@ BOARDS += rocm
|
||||
ROCM_CHIPSETS:=gfx900:9.0.0 gfx1030:10.3.0 gfx1100:11.0.0
|
||||
|
||||
local-rocm: version
|
||||
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(word 1,$(subst :, ,$(chipset))) HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) HSA_OVERRIDE=1 docker buildx bake --load --file=docker/rocm/rocm.hcl --set rocm.tags=frigate:latest-rocm-$(word 1,$(subst :, ,$(chipset))) rocm;)
|
||||
unset HSA_OVERRIDE_GFX_VERSION && HSA_OVERRIDE=0 AMDGPU=gfx docker buildx bake --load --file=docker/rocm/rocm.hcl --set rocm.tags=frigate:latest-rocm rocm
|
||||
$(foreach chipset,$(ROCM_CHIPSETS), \
|
||||
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE=1 \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=frigate:latest-rocm-$(word 1,$(subst :, ,$(chipset))) \
|
||||
--load \
|
||||
&&) true
|
||||
|
||||
unset HSA_OVERRIDE_GFX_VERSION && \
|
||||
HSA_OVERRIDE=0 \
|
||||
AMDGPU=gfx \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=frigate:latest-rocm \
|
||||
--load
|
||||
|
||||
build-rocm: version
|
||||
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(word 1,$(subst :, ,$(chipset))) HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) HSA_OVERRIDE=1 docker buildx bake --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) rocm;)
|
||||
unset HSA_OVERRIDE_GFX_VERSION && HSA_OVERRIDE=0 AMDGPU=gfx docker buildx bake --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm rocm
|
||||
$(foreach chipset,$(ROCM_CHIPSETS), \
|
||||
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE=1 \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) \
|
||||
&&) true
|
||||
|
||||
unset HSA_OVERRIDE_GFX_VERSION && \
|
||||
HSA_OVERRIDE=0 \
|
||||
AMDGPU=gfx \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm
|
||||
|
||||
push-rocm: build-rocm
|
||||
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(word 1,$(subst :, ,$(chipset))) HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) HSA_OVERRIDE=1 docker buildx bake --push --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) rocm;)
|
||||
unset HSA_OVERRIDE_GFX_VERSION && HSA_OVERRIDE=0 AMDGPU=gfx docker buildx bake --push --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm rocm
|
||||
$(foreach chipset,$(ROCM_CHIPSETS), \
|
||||
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
|
||||
HSA_OVERRIDE=1 \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) \
|
||||
--push \
|
||||
&&) true
|
||||
|
||||
unset HSA_OVERRIDE_GFX_VERSION && \
|
||||
HSA_OVERRIDE=0 \
|
||||
AMDGPU=gfx \
|
||||
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
|
||||
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm \
|
||||
--push
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
# Compile YoloV8 ONNX files into ROCm MIGraphX files
|
||||
|
||||
OVERRIDE=$(cd /opt/frigate && python3 -c 'import frigate.detectors.plugins.rocm as rocm; print(rocm.auto_override_gfx_version())')
|
||||
|
||||
if ! test -z "$OVERRIDE"; then
|
||||
echo "Using HSA_OVERRIDE_GFX_VERSION=${OVERRIDE}"
|
||||
export HSA_OVERRIDE_GFX_VERSION=$OVERRIDE
|
||||
fi
|
||||
|
||||
for onnx in /config/model_cache/yolov8/*.onnx
|
||||
do
|
||||
mxr="${onnx%.onnx}.mxr"
|
||||
if ! test -f $mxr; then
|
||||
echo "processing $onnx into $mxr"
|
||||
/opt/rocm/bin/migraphx-driver compile $onnx --optimize --gpu --enable-offload-copy --binary -o $mxr
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
oneshot
|
||||
@@ -1 +0,0 @@
|
||||
/etc/s6-overlay/s6-rc.d/compile-rocm-models/run
|
||||
@@ -12,5 +12,7 @@ RUN rm -rf /usr/lib/btbn-ffmpeg/
|
||||
RUN --mount=type=bind,source=docker/rpi/install_deps.sh,target=/deps/install_deps.sh \
|
||||
/deps/install_deps.sh
|
||||
|
||||
ENV LIBAVFORMAT_VERSION_MAJOR=58
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
BOARDS += rpi
|
||||
|
||||
local-rpi: version
|
||||
docker buildx bake --load --file=docker/rpi/rpi.hcl --set rpi.tags=frigate:latest-rpi rpi
|
||||
docker buildx bake --file=docker/rpi/rpi.hcl rpi \
|
||||
--set rpi.tags=frigate:latest-rpi \
|
||||
--load
|
||||
|
||||
build-rpi: version
|
||||
docker buildx bake --file=docker/rpi/rpi.hcl --set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi rpi
|
||||
docker buildx bake --file=docker/rpi/rpi.hcl rpi \
|
||||
--set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi
|
||||
|
||||
push-rpi: build-rpi
|
||||
docker buildx bake --push --file=docker/rpi/rpi.hcl --set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi rpi
|
||||
docker buildx bake --file=docker/rpi/rpi.hcl rpi \
|
||||
--set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi \
|
||||
--push
|
||||
|
||||
@@ -12,12 +12,28 @@ ARG TARGETARCH
|
||||
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
|
||||
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
|
||||
|
||||
# Build CuDNN
|
||||
FROM wget AS cudnn-deps
|
||||
|
||||
ARG COMPUTE_LEVEL
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y git build-essential
|
||||
|
||||
RUN wget https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.1-1_all.deb \
|
||||
&& dpkg -i cuda-keyring_1.1-1_all.deb \
|
||||
&& apt-get update \
|
||||
&& apt-get -y install cuda-toolkit \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
FROM tensorrt-base AS frigate-tensorrt
|
||||
ENV TRT_VER=8.5.3
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
pip3 install -U /deps/trt-wheels/*.whl && \
|
||||
ldconfig
|
||||
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
|
||||
|
||||
ENV LD_LIBRARY_PATH=/usr/local/lib/python3.9/dist-packages/tensorrt:/usr/local/cuda/lib64:/usr/local/lib/python3.9/dist-packages/nvidia/cufft/lib
|
||||
WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
|
||||
@@ -26,6 +42,7 @@ FROM devcontainer AS devcontainer-trt
|
||||
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
|
||||
COPY docker/tensorrt/detector/rootfs/ /
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
|
||||
@@ -8,5 +8,7 @@ nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
|
||||
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
|
||||
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
|
||||
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
|
||||
onnx==1.14.0; platform_machine == 'x86_64'
|
||||
protobuf==3.20.3; platform_machine == 'x86_64'
|
||||
onnxruntime-gpu==1.17.*; platform_machine == 'x86_64'
|
||||
protobuf==3.20.3; platform_machine == 'x86_64'
|
||||
|
||||
@@ -7,20 +7,35 @@ JETPACK4_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK4_BASE) SLIM_BASE=$(JETPACK4_BAS
|
||||
JETPACK5_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK5_BASE) SLIM_BASE=$(JETPACK5_BASE) TRT_BASE=$(JETPACK5_BASE)
|
||||
|
||||
local-trt: version
|
||||
$(X86_DGPU_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt tensorrt
|
||||
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=frigate:latest-tensorrt \
|
||||
--load
|
||||
|
||||
local-trt-jp4: version
|
||||
$(JETPACK4_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt-jp4 tensorrt
|
||||
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=frigate:latest-tensorrt-jp4 \
|
||||
--load
|
||||
|
||||
local-trt-jp5: version
|
||||
$(JETPACK5_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt-jp5 tensorrt
|
||||
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=frigate:latest-tensorrt-jp5 \
|
||||
--load
|
||||
|
||||
build-trt:
|
||||
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt
|
||||
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 tensorrt
|
||||
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 tensorrt
|
||||
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt
|
||||
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4
|
||||
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5
|
||||
|
||||
push-trt: build-trt
|
||||
$(X86_DGPU_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt
|
||||
$(JETPACK4_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 tensorrt
|
||||
$(JETPACK5_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 tensorrt
|
||||
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt \
|
||||
--push
|
||||
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 \
|
||||
--push
|
||||
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 \
|
||||
--push
|
||||
|
||||
@@ -41,7 +41,7 @@ environment_vars:
|
||||
|
||||
### `database`
|
||||
|
||||
Event and recording information is managed in a sqlite database at `/config/frigate.db`. If that database is deleted, recordings will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant.
|
||||
Tracked object and recording information is managed in a sqlite database at `/config/frigate.db`. If that database is deleted, recordings will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant.
|
||||
|
||||
If you are storing your database on a network share (SMB, NFS, etc), you may get a `database is locked` error message on startup. You can customize the location of the database in the config if necessary.
|
||||
|
||||
@@ -162,15 +162,15 @@ listen [::]:5000 ipv6only=off;
|
||||
|
||||
### Custom ffmpeg build
|
||||
|
||||
Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, a docker volume mapping can be used to overwrite the included ffmpeg build with an ffmpeg build that works for your specific hardware setup.
|
||||
Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, statically built ffmpeg binary can be downloaded to /config and used.
|
||||
|
||||
To do this:
|
||||
|
||||
1. Download your ffmpeg build and uncompress to a folder on the host (let's use `/home/appdata/frigate/custom-ffmpeg` for this example).
|
||||
1. Download your ffmpeg build and uncompress to the Frigate config folder.
|
||||
2. Update your docker-compose or docker CLI to include `'/home/appdata/frigate/custom-ffmpeg':'/usr/lib/btbn-ffmpeg':'ro'` in the volume mappings.
|
||||
3. Restart Frigate and the custom version will be used if the mapping was done correctly.
|
||||
|
||||
NOTE: The folder that is mapped from the host needs to be the folder that contains `/bin`. So if the full structure is `/home/appdata/frigate/custom-ffmpeg/bin/ffmpeg` then `/home/appdata/frigate/custom-ffmpeg` needs to be mapped to `/usr/lib/btbn-ffmpeg`.
|
||||
NOTE: The folder that is set for the config needs to be the folder that contains `/bin`. So if the full structure is `/home/appdata/frigate/custom-ffmpeg/bin/ffmpeg` then the `ffmpeg -> path` field should be `/config/custom-ffmpeg/bin`.
|
||||
|
||||
### Custom go2rtc version
|
||||
|
||||
|
||||
@@ -187,4 +187,4 @@ ffmpeg:
|
||||
|
||||
### TP-Link VIGI Cameras
|
||||
|
||||
TP-Link VIGI cameras need some adjustments to the main stream settings on the camera itself to avoid issues. The stream needs to be configured as `H264` with `Smart Coding` set to `off`. Without these settings you may have problems when trying to watch recorded events. For example Firefox will stop playback after a few seconds and show the following error message: `The media playback was aborted due to a corruption problem or because the media used features your browser did not support.`.
|
||||
TP-Link VIGI cameras need some adjustments to the main stream settings on the camera itself to avoid issues. The stream needs to be configured as `H264` with `Smart Coding` set to `off`. Without these settings you may have problems when trying to watch recorded footage. For example Firefox will stop playback after a few seconds and show the following error message: `The media playback was aborted due to a corruption problem or because the media used features your browser did not support.`.
|
||||
|
||||
@@ -7,7 +7,7 @@ title: Camera Configuration
|
||||
|
||||
Several inputs can be configured for each camera and the role of each input can be mixed and matched based on your needs. This allows you to use a lower resolution stream for object detection, but create recordings from a higher resolution stream, or vice versa.
|
||||
|
||||
A camera is enabled by default but can be temporarily disabled by using `enabled: False`. Existing events and recordings can still be accessed. Live streams, recording and detecting are not working. Camera specific configurations will be used.
|
||||
A camera is enabled by default but can be temporarily disabled by using `enabled: False`. Existing tracked objects and recordings can still be accessed. Live streams, recording and detecting are not working. Camera specific configurations will be used.
|
||||
|
||||
Each role can only be assigned to one input per camera. The options for roles are as follows:
|
||||
|
||||
@@ -46,6 +46,14 @@ cameras:
|
||||
side: ...
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
If you only define one stream in your `inputs` and do not assign a `detect` role to it, Frigate will automatically assign it the `detect` role. Frigate will always decode a stream to support motion detection, Birdseye, the API image endpoints, and other features, even if you have disabled object detection with `enabled: False` in your config's `detect` section.
|
||||
|
||||
If you plan to use Frigate for recording only, it is still recommended to define a `detect` role for a low resolution stream to minimize resource usage from the required stream decoding.
|
||||
|
||||
:::
|
||||
|
||||
For camera model specific settings check the [camera specific](camera_specific.md) infos.
|
||||
|
||||
## Setting up camera PTZ controls
|
||||
|
||||
@@ -3,7 +3,9 @@ id: genai
|
||||
title: Generative AI
|
||||
---
|
||||
|
||||
Generative AI can be used to automatically generate descriptions based on the thumbnails of your events. This helps with [semantic search](/configuration/semantic_search) in Frigate by providing detailed text descriptions as a basis of the search query.
|
||||
Generative AI can be used to automatically generate descriptions based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate by providing detailed text descriptions as a basis of the search query.
|
||||
|
||||
Semantic Search must be enabled to use Generative AI. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -100,7 +102,7 @@ genai:
|
||||
|
||||
## Custom Prompts
|
||||
|
||||
Frigate sends multiple frames from the detection along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:
|
||||
Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:
|
||||
|
||||
```
|
||||
Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background.
|
||||
@@ -108,7 +110,7 @@ Describe the {label} in the sequence of images with as much detail as possible.
|
||||
|
||||
:::tip
|
||||
|
||||
Prompts can use variable replacements like `{label}`, `{sub_label}`, and `{camera}` to substitute information from the detection as part of the prompt.
|
||||
Prompts can use variable replacements like `{label}`, `{sub_label}`, and `{camera}` to substitute information from the tracked object as part of the prompt.
|
||||
|
||||
:::
|
||||
|
||||
@@ -122,13 +124,25 @@ genai:
|
||||
model: llava
|
||||
prompt: "Describe the {label} in these images from the {camera} security camera."
|
||||
object_prompts:
|
||||
person: "Describe the main person in these images (gender, age, clothing, activity, etc). Do not include where the activity is occurring (sidewalk, concrete, driveway, etc). If delivering a package, include the company the package is from."
|
||||
person: "Describe the main person in these images (gender, age, clothing, activity, etc). Do not include where the activity is occurring (sidewalk, concrete, driveway, etc)."
|
||||
car: "Label the primary vehicle in these images with just the name of the company if it is a delivery vehicle, or the color make and model."
|
||||
```
|
||||
|
||||
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
front_door:
|
||||
genai:
|
||||
prompt: "Describe the {label} in these images from the {camera} security camera at the front door of a house, aimed outward toward the street."
|
||||
object_prompts:
|
||||
person: "Describe the main person in these images (gender, age, clothing, activity, etc). Do not include where the activity is occurring (sidewalk, concrete, driveway, etc). If delivering a package, include the company the package is from."
|
||||
cat: "Describe the cat in these images (color, size, tail). Indicate whether or not the cat is by the flower pots. If the cat is chasing a mouse, make up a name for the mouse."
|
||||
```
|
||||
|
||||
### Experiment with prompts
|
||||
|
||||
Providers also has a public facing chat interface for their models. Download a couple different thumbnails or snapshots from Frigate and try new things in the playground to get descriptions to your liking before updating the prompt in Frigate.
|
||||
Many providers also have a public facing chat interface for their models. Download a couple of different thumbnails or snapshots from Frigate and try new things in the playground to get descriptions to your liking before updating the prompt in Frigate.
|
||||
|
||||
- OpenAI - [ChatGPT](https://chatgpt.com)
|
||||
- Gemini - [Google AI Studio](https://aistudio.google.com)
|
||||
|
||||
@@ -65,24 +65,33 @@ Or map in all the `/dev/video*` devices.
|
||||
|
||||
## Intel-based CPUs
|
||||
|
||||
**Recommended hwaccel Preset**
|
||||
|
||||
| CPU Generation | Intel Driver | Recommended Preset | Notes |
|
||||
| -------------- | ------------ | ------------------ | ----------------------------------- |
|
||||
| gen1 - gen7 | i965 | preset-vaapi | qsv is not supported |
|
||||
| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-* can also be used |
|
||||
| gen13+ | iHD / Xe | preset-intel-qsv-* | |
|
||||
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-* | |
|
||||
|
||||
:::note
|
||||
|
||||
The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
|
||||
|
||||
See [The Intel Docs](https://www.intel.com/content/www/us/en/support/articles/000005505/processors.html to figure out what generation your CPU is.)
|
||||
|
||||
:::
|
||||
|
||||
### Via VAAPI
|
||||
|
||||
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. VAAPI is recommended for all generations of Intel-based CPUs.
|
||||
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
|
||||
|
||||
```yaml
|
||||
ffmpeg:
|
||||
hwaccel_args: preset-vaapi
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
With some of the processors, like the J4125, the default driver `iHD` doesn't seem to work correctly for hardware acceleration. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
|
||||
|
||||
:::
|
||||
|
||||
### Via Quicksync (>=10th Generation only)
|
||||
|
||||
If VAAPI does not work for you, you can try QSV if your processor supports it. QSV must be set specifically based on the video encoding of the stream.
|
||||
### Via Quicksync
|
||||
|
||||
#### H.264 streams
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ Here are some common starter configuration examples. Refer to the [reference con
|
||||
- Hardware acceleration for decoding video
|
||||
- USB Coral detector
|
||||
- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not
|
||||
- Continue to keep all video if it was during any event for 30 days
|
||||
- Continue to keep all video if it qualified as an alert or detection for 30 days
|
||||
- Save snapshots for 30 days
|
||||
- Motion mask for the camera timestamp
|
||||
|
||||
@@ -95,10 +95,12 @@ record:
|
||||
retain:
|
||||
days: 7
|
||||
mode: motion
|
||||
events:
|
||||
alerts:
|
||||
retain:
|
||||
default: 30
|
||||
mode: motion
|
||||
days: 30
|
||||
detections:
|
||||
retain:
|
||||
days: 30
|
||||
|
||||
snapshots:
|
||||
enabled: True
|
||||
@@ -128,7 +130,7 @@ cameras:
|
||||
- VAAPI hardware acceleration for decoding video
|
||||
- USB Coral detector
|
||||
- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not
|
||||
- Continue to keep all video if it was during any event for 30 days
|
||||
- Continue to keep all video if it qualified as an alert or detection for 30 days
|
||||
- Save snapshots for 30 days
|
||||
- Motion mask for the camera timestamp
|
||||
|
||||
@@ -149,10 +151,12 @@ record:
|
||||
retain:
|
||||
days: 7
|
||||
mode: motion
|
||||
events:
|
||||
alerts:
|
||||
retain:
|
||||
default: 30
|
||||
mode: motion
|
||||
days: 30
|
||||
detections:
|
||||
retain:
|
||||
days: 30
|
||||
|
||||
snapshots:
|
||||
enabled: True
|
||||
@@ -182,7 +186,7 @@ cameras:
|
||||
- VAAPI hardware acceleration for decoding video
|
||||
- OpenVino detector
|
||||
- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not
|
||||
- Continue to keep all video if it was during any event for 30 days
|
||||
- Continue to keep all video if it qualified as an alert or detection for 30 days
|
||||
- Save snapshots for 30 days
|
||||
- Motion mask for the camera timestamp
|
||||
|
||||
@@ -214,10 +218,12 @@ record:
|
||||
retain:
|
||||
days: 7
|
||||
mode: motion
|
||||
events:
|
||||
alerts:
|
||||
retain:
|
||||
default: 30
|
||||
mode: motion
|
||||
days: 30
|
||||
detections:
|
||||
retain:
|
||||
days: 30
|
||||
|
||||
snapshots:
|
||||
enabled: True
|
||||
|
||||
@@ -13,11 +13,11 @@ Once motion is detected, it tries to group up nearby areas of motion together in
|
||||
|
||||
The default motion settings should work well for the majority of cameras, however there are cases where tuning motion detection can lead to better and more optimal results. Each camera has its own environment with different variables that affect motion, this means that the same motion settings will not fit all of your cameras.
|
||||
|
||||
Before tuning motion it is important to understand the goal. In an optimal configuration, motion from people and cars would be detected, but not grass moving, lighting changes, timestamps, etc. If your motion detection is too sensitive, you will experience higher CPU loads and greater false positives from the increased rate of object detection. If it is not sensitive enough, you will miss events.
|
||||
Before tuning motion it is important to understand the goal. In an optimal configuration, motion from people and cars would be detected, but not grass moving, lighting changes, timestamps, etc. If your motion detection is too sensitive, you will experience higher CPU loads and greater false positives from the increased rate of object detection. If it is not sensitive enough, you will miss objects that you want to track.
|
||||
|
||||
## Create Motion Masks
|
||||
|
||||
First, mask areas with regular motion not caused by the objects you want to detect. The best way to find candidates for motion masks is by watching the debug stream with motion boxes enabled. Good use cases for motion masks are timestamps or tree limbs and large bushes that regularly move due to wind. When possible, avoid creating motion masks that would block motion detection for objects you want to track **even if they are in locations where you don't want events**. Motion masks should not be used to avoid detecting objects in specific areas. More details can be found [in the masks docs.](/configuration/masks.md).
|
||||
First, mask areas with regular motion not caused by the objects you want to detect. The best way to find candidates for motion masks is by watching the debug stream with motion boxes enabled. Good use cases for motion masks are timestamps or tree limbs and large bushes that regularly move due to wind. When possible, avoid creating motion masks that would block motion detection for objects you want to track **even if they are in locations where you don't want alerts or detections**. Motion masks should not be used to avoid detecting objects in specific areas. More details can be found [in the masks docs.](/configuration/masks.md).
|
||||
|
||||
## Prepare For Testing
|
||||
|
||||
@@ -29,7 +29,7 @@ Now that things are set up, find a time to tune that represents normal circumsta
|
||||
|
||||
:::note
|
||||
|
||||
Remember that motion detection is just used to determine when object detection should be used. You should aim to have motion detection sensitive enough that you won't miss events from objects you want to detect with object detection. The goal is to prevent object detection from running constantly for every small pixel change in the image. Windy days are still going to result in lots of motion being detected.
|
||||
Remember that motion detection is just used to determine when object detection should be used. You should aim to have motion detection sensitive enough that you won't miss objects you want to detect with object detection. The goal is to prevent object detection from running constantly for every small pixel change in the image. Windy days are still going to result in lots of motion being detected.
|
||||
|
||||
:::
|
||||
|
||||
@@ -94,7 +94,7 @@ motion:
|
||||
|
||||
:::tip
|
||||
|
||||
Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these events are not missed.
|
||||
Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed.
|
||||
|
||||
:::
|
||||
|
||||
|
||||
@@ -3,6 +3,29 @@ id: object_detectors
|
||||
title: Object Detectors
|
||||
---
|
||||
|
||||
# Supported Hardware
|
||||
|
||||
Frigate supports multiple different detectors that work on different types of hardware:
|
||||
|
||||
**Most Hardware**
|
||||
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
|
||||
- [Hailo](#hailo-8l): The Hailo8 AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices.
|
||||
|
||||
**AMD**
|
||||
- [ROCm](#amdrocm-gpu-detector): ROCm can run on AMD Discrete GPUs to provide efficient object detection.
|
||||
- [ONNX](#onnx): ROCm will automatically be detected and used as a detector in the `-rocm` Frigate image when a supported ONNX model is configured.
|
||||
|
||||
**Intel**
|
||||
- [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection.
|
||||
- [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured.
|
||||
|
||||
**Nvidia**
|
||||
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs, using one of many default models.
|
||||
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
|
||||
|
||||
**Rockchip**
|
||||
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
|
||||
|
||||
# Officially Supported Detectors
|
||||
|
||||
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, `tensorrt`, `rknn`, and `hailo8l`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
|
||||
@@ -122,6 +145,22 @@ The OpenVINO device to be used is specified using the `"device"` attribute accor
|
||||
|
||||
OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. It will also run on AMD CPUs despite having no official support for it. A supported Intel platform is required to use the `GPU` device with OpenVINO. For detailed system requirements, see [OpenVINO System Requirements](https://docs.openvino.ai/2024/about-openvino/release-notes-openvino/system-requirements.html)
|
||||
|
||||
:::tip
|
||||
|
||||
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
ov_0:
|
||||
type: openvino
|
||||
device: GPU
|
||||
ov_1:
|
||||
type: openvino
|
||||
device: GPU
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
### Supported Models
|
||||
|
||||
#### SSDLite MobileNet v2
|
||||
@@ -278,6 +317,173 @@ model:
|
||||
height: 320
|
||||
```
|
||||
|
||||
## AMD/ROCm GPU detector
|
||||
|
||||
### Setup
|
||||
|
||||
The `rocm` detector supports running YOLO-NAS models on AMD GPUs. Use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`.
|
||||
|
||||
### Docker settings for GPU access
|
||||
|
||||
ROCm needs access to the `/dev/kfd` and `/dev/dri` devices. When docker or frigate is not run under root then also `video` (and possibly `render` and `ssl/_ssl`) groups should be added.
|
||||
|
||||
When running docker directly the following flags should be added for device access:
|
||||
|
||||
```bash
|
||||
$ docker run --device=/dev/kfd --device=/dev/dri \
|
||||
...
|
||||
```
|
||||
|
||||
When using docker compose:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
---
|
||||
devices:
|
||||
- /dev/dri
|
||||
- /dev/kfd
|
||||
```
|
||||
|
||||
For reference on recommended settings see [running ROCm/pytorch in Docker](https://rocm.docs.amd.com/projects/install-on-linux/en/develop/how-to/3rd-party/pytorch-install.html#using-docker-with-pytorch-pre-installed).
|
||||
|
||||
### Docker settings for overriding the GPU chipset
|
||||
|
||||
Your GPU might work just fine without any special configuration but in many cases they need manual settings. AMD/ROCm software stack comes with a limited set of GPU drivers and for newer or missing models you will have to override the chipset version to an older/generic version to get things working.
|
||||
|
||||
Also AMD/ROCm does not "officially" support integrated GPUs. It still does work with most of them just fine but requires special settings. One has to configure the `HSA_OVERRIDE_GFX_VERSION` environment variable. See the [ROCm bug report](https://github.com/ROCm/ROCm/issues/1743) for context and examples.
|
||||
|
||||
For the rocm frigate build there is some automatic detection:
|
||||
|
||||
- gfx90c -> 9.0.0
|
||||
- gfx1031 -> 10.3.0
|
||||
- gfx1103 -> 11.0.0
|
||||
|
||||
If you have something else you might need to override the `HSA_OVERRIDE_GFX_VERSION` at Docker launch. Suppose the version you want is `9.0.0`, then you should configure it from command line as:
|
||||
|
||||
```bash
|
||||
$ docker run -e HSA_OVERRIDE_GFX_VERSION=9.0.0 \
|
||||
...
|
||||
```
|
||||
|
||||
When using docker compose:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
environment:
|
||||
HSA_OVERRIDE_GFX_VERSION: "9.0.0"
|
||||
```
|
||||
|
||||
Figuring out what version you need can be complicated as you can't tell the chipset name and driver from the AMD brand name.
|
||||
|
||||
- first make sure that rocm environment is running properly by running `/opt/rocm/bin/rocminfo` in the frigate container -- it should list both the CPU and the GPU with their properties
|
||||
- find the chipset version you have (gfxNNN) from the output of the `rocminfo` (see below)
|
||||
- use a search engine to query what `HSA_OVERRIDE_GFX_VERSION` you need for the given gfx name ("gfxNNN ROCm HSA_OVERRIDE_GFX_VERSION")
|
||||
- override the `HSA_OVERRIDE_GFX_VERSION` with relevant value
|
||||
- if things are not working check the frigate docker logs
|
||||
|
||||
#### Figuring out if AMD/ROCm is working and found your GPU
|
||||
|
||||
```bash
|
||||
$ docker exec -it frigate /opt/rocm/bin/rocminfo
|
||||
```
|
||||
|
||||
#### Figuring out your AMD GPU chipset version:
|
||||
|
||||
We unset the `HSA_OVERRIDE_GFX_VERSION` to prevent an existing override from messing up the result:
|
||||
|
||||
```bash
|
||||
$ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo |grep gfx)'
|
||||
```
|
||||
|
||||
### Supported Models
|
||||
|
||||
There is no default model provided, the following formats are supported:
|
||||
|
||||
#### YOLO-NAS
|
||||
|
||||
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
|
||||
|
||||
:::warning
|
||||
|
||||
The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
|
||||
|
||||
:::
|
||||
|
||||
The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired.
|
||||
|
||||
After placing the downloaded onnx model in your config folder, you can use the following configuration:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
onnx:
|
||||
type: rocm
|
||||
|
||||
model:
|
||||
model_type: yolonas
|
||||
width: 320 # <--- should match whatever was set in notebook
|
||||
height: 320 # <--- should match whatever was set in notebook
|
||||
input_pixel_format: bgr
|
||||
path: /config/yolo_nas_s.onnx
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
```
|
||||
|
||||
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
|
||||
|
||||
## ONNX
|
||||
|
||||
ONNX is an open format for building machine learning models, Frigate supports running ONNX models on CPU, OpenVINO, and TensorRT. On startup Frigate will automatically try to use a GPU if one is available.
|
||||
|
||||
:::tip
|
||||
|
||||
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
onnx_0:
|
||||
type: onnx
|
||||
onnx_1:
|
||||
type: onnx
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
### Supported Models
|
||||
|
||||
There is no default model provided, the following formats are supported:
|
||||
|
||||
#### YOLO-NAS
|
||||
|
||||
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
|
||||
|
||||
:::warning
|
||||
|
||||
The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
|
||||
|
||||
:::
|
||||
|
||||
The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired.
|
||||
|
||||
After placing the downloaded onnx model in your config folder, you can use the following configuration:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
onnx:
|
||||
type: onnx
|
||||
|
||||
model:
|
||||
model_type: yolonas
|
||||
width: 320 # <--- should match whatever was set in notebook
|
||||
height: 320 # <--- should match whatever was set in notebook
|
||||
input_pixel_format: bgr
|
||||
path: /config/yolo_nas_s.onnx
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
```
|
||||
|
||||
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
|
||||
|
||||
## Deepstack / CodeProject.AI Server Detector
|
||||
|
||||
The Deepstack / CodeProject.AI Server detector for Frigate allows you to integrate Deepstack and CodeProject.AI object detection capabilities into Frigate. CodeProject.AI and DeepStack are open-source AI platforms that can be run on various devices such as the Raspberry Pi, Nvidia Jetson, and other compatible hardware. It is important to note that the integration is performed over the network, so the inference times may not be as fast as native Frigate detectors, but it still provides an efficient and reliable solution for object detection and tracking.
|
||||
@@ -389,7 +595,7 @@ $ cat /sys/kernel/debug/rknpu/load
|
||||
|
||||
## Hailo-8l
|
||||
|
||||
This detector is available if you are using the Raspberry Pi 5 with Hailo-8L AI Kit. This has not been tested using the Hailo-8L with other hardware.
|
||||
This detector is available for use with Hailo-8 AI Acceleration Module.
|
||||
|
||||
### Configuration
|
||||
|
||||
|
||||
@@ -20,15 +20,13 @@ For object filters in your configuration, any single detection below `min_score`
|
||||
|
||||
In frame 2, the score is below the `min_score` value, so Frigate ignores it and it becomes a 0.0. The computed score is the median of the score history (padding to at least 3 values), and only when that computed score crosses the `threshold` is the object marked as a true positive. That happens in frame 4 in the example.
|
||||
|
||||
show image of snapshot vs event with differing scores
|
||||
|
||||
### Minimum Score
|
||||
|
||||
Any detection below `min_score` will be immediately thrown out and never tracked because it is considered a false positive. If `min_score` is too low then false positives may be detected and tracked which can confuse the object tracker and may lead to wasted resources. If `min_score` is too high then lower scoring true positives like objects that are further away or partially occluded may be thrown out which can also confuse the tracker and cause valid events to be lost or disjointed.
|
||||
Any detection below `min_score` will be immediately thrown out and never tracked because it is considered a false positive. If `min_score` is too low then false positives may be detected and tracked which can confuse the object tracker and may lead to wasted resources. If `min_score` is too high then lower scoring true positives like objects that are further away or partially occluded may be thrown out which can also confuse the tracker and cause valid tracked objects to be lost or disjointed.
|
||||
|
||||
### Threshold
|
||||
|
||||
`threshold` is used to determine that the object is a true positive. Once an object is detected with a score >= `threshold` object is considered a true positive. If `threshold` is too low then some higher scoring false positives may create an event. If `threshold` is too high then true positive events may be missed due to the object never scoring high enough.
|
||||
`threshold` is used to determine that the object is a true positive. Once an object is detected with a score >= `threshold` object is considered a true positive. If `threshold` is too low then some higher scoring false positives may create an tracked object. If `threshold` is too high then true positive tracked objects may be missed due to the object never scoring high enough.
|
||||
|
||||
## Object Shape
|
||||
|
||||
@@ -52,7 +50,7 @@ Conceptually, a ratio of 1 is a square, 0.5 is a "tall skinny" box, and 2 is a "
|
||||
|
||||
### Zones
|
||||
|
||||
[Required zones](/configuration/zones.md) can be a great tool to reduce false positives that may be detected in the sky or other areas that are not of interest. The required zones will only create events for objects that enter the zone.
|
||||
[Required zones](/configuration/zones.md) can be a great tool to reduce false positives that may be detected in the sky or other areas that are not of interest. The required zones will only create tracked objects for objects that enter the zone.
|
||||
|
||||
### Object Masks
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ id: record
|
||||
title: Recording
|
||||
---
|
||||
|
||||
Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM-DD/HH/<camera_name>/MM.SS.mp4` in **UTC time**. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy in the config. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed.
|
||||
Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM-DD/HH/<camera_name>/MM.SS.mp4` in **UTC time**. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy in the config. Frigate chooses the largest matching retention value between the recording retention and the tracked object retention when determining if a recording should be removed.
|
||||
|
||||
New recording segments are written from the camera stream to cache, they are only moved to disk if they match the setup recording retention policy.
|
||||
|
||||
@@ -13,7 +13,7 @@ H265 recordings can be viewed in Chrome 108+, Edge and Safari only. All other br
|
||||
|
||||
### Most conservative: Ensure all video is saved
|
||||
|
||||
For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion and overlapping with events will be retained until 30 days have passed.
|
||||
For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed.
|
||||
|
||||
```yaml
|
||||
record:
|
||||
@@ -21,9 +21,13 @@ record:
|
||||
retain:
|
||||
days: 3
|
||||
mode: all
|
||||
events:
|
||||
alerts:
|
||||
retain:
|
||||
default: 30
|
||||
days: 30
|
||||
mode: motion
|
||||
detections:
|
||||
retain:
|
||||
days: 30
|
||||
mode: motion
|
||||
```
|
||||
|
||||
@@ -37,25 +41,28 @@ record:
|
||||
retain:
|
||||
days: 3
|
||||
mode: motion
|
||||
events:
|
||||
alerts:
|
||||
retain:
|
||||
default: 30
|
||||
days: 30
|
||||
mode: motion
|
||||
detections:
|
||||
retain:
|
||||
days: 30
|
||||
mode: motion
|
||||
```
|
||||
|
||||
### Minimum: Events only
|
||||
### Minimum: Alerts only
|
||||
|
||||
If you only want to retain video that occurs during an event, this config will discard video unless an event is ongoing.
|
||||
If you only want to retain video that occurs during a tracked object, this config will discard video unless an alert is ongoing.
|
||||
|
||||
```yaml
|
||||
record:
|
||||
enabled: True
|
||||
retain:
|
||||
days: 0
|
||||
mode: all
|
||||
events:
|
||||
alerts:
|
||||
retain:
|
||||
default: 30
|
||||
days: 30
|
||||
mode: motion
|
||||
```
|
||||
|
||||
@@ -65,7 +72,7 @@ As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 h
|
||||
|
||||
## Configuring Recording Retention
|
||||
|
||||
Frigate supports both continuous and event based recordings with separate retention modes and retention periods.
|
||||
Frigate supports both continuous and tracked object based recordings with separate retention modes and retention periods.
|
||||
|
||||
:::tip
|
||||
|
||||
@@ -86,25 +93,28 @@ record:
|
||||
|
||||
Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean)
|
||||
|
||||
### Event Recording
|
||||
### Object Recording
|
||||
|
||||
If you only used clips in previous versions with recordings disabled, you can use the following config to get the same behavior. This is also the default behavior when recordings are enabled.
|
||||
The number of days to record review items can be specified for review items classified as alerts as well as tracked objects.
|
||||
|
||||
```yaml
|
||||
record:
|
||||
enabled: True
|
||||
events:
|
||||
alerts:
|
||||
retain:
|
||||
default: 10 # <- number of days to keep event recordings
|
||||
days: 10 # <- number of days to keep alert recordings
|
||||
detections:
|
||||
retain:
|
||||
days: 10 # <- number of days to keep detections recordings
|
||||
```
|
||||
|
||||
This configuration will retain recording segments that overlap with events and have active tracked objects for 10 days. Because multiple events can reference the same recording segments, this avoids storing duplicate footage for overlapping events and reduces overall storage needs.
|
||||
This configuration will retain recording segments that overlap with alerts and detections for 10 days. Because multiple tracked objects can reference the same recording segments, this avoids storing duplicate footage for overlapping tracked objects and reduces overall storage needs.
|
||||
|
||||
**WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect.
|
||||
|
||||
## What do the different retain modes mean?
|
||||
|
||||
Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for continuous recording (but can also affect events).
|
||||
Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for continuous recording (but can also affect tracked objects).
|
||||
|
||||
Let's say you have Frigate configured so that your doorbell camera would retain the last **2** days of continuous recording.
|
||||
|
||||
@@ -112,11 +122,7 @@ Let's say you have Frigate configured so that your doorbell camera would retain
|
||||
- With the `motion` option the only parts of those 48 hours would be segments that Frigate detected motion. This is the middle ground option that won't keep all 48 hours, but will likely keep all segments of interest along with the potential for some extra segments.
|
||||
- With the `active_objects` option the only segments that would be kept are those where there was a true positive object that was not considered stationary.
|
||||
|
||||
The same options are available with events. Let's consider a scenario where you drive up and park in your driveway, go inside, then come back out 4 hours later.
|
||||
|
||||
- With the `all` option all segments for the duration of the event would be saved for the event. This event would have 4 hours of footage.
|
||||
- With the `motion` option all segments for the duration of the event with motion would be saved. This means any segment where a car drove by in the street, person walked by, lighting changed, etc. would be saved.
|
||||
- With the `active_objects` it would only keep segments where the object was active. In this case the only segments that would be saved would be the ones where the car was driving up, you going inside, you coming outside, and the car driving away. Essentially reducing the 4 hours to a minute or two of event footage.
|
||||
The same options are available with alerts and detections, except it will only save the recordings when it overlaps with a review item of that type.
|
||||
|
||||
A configuration example of the above retain modes where all `motion` segments are stored for 7 days and `active objects` are stored for 14 days would be as follows:
|
||||
|
||||
@@ -126,33 +132,18 @@ record:
|
||||
retain:
|
||||
days: 7
|
||||
mode: motion
|
||||
events:
|
||||
alerts:
|
||||
retain:
|
||||
default: 14
|
||||
days: 14
|
||||
mode: active_objects
|
||||
detections:
|
||||
retain:
|
||||
days: 14
|
||||
mode: active_objects
|
||||
```
|
||||
|
||||
The above configuration example can be added globally or on a per camera basis.
|
||||
|
||||
### Object Specific Retention
|
||||
|
||||
You can also set specific retention length for an object type. The below configuration example builds on from above but also specifies that recordings of dogs only need to be kept for 2 days and recordings of cars should be kept for 7 days.
|
||||
|
||||
```yaml
|
||||
record:
|
||||
enabled: True
|
||||
retain:
|
||||
days: 7
|
||||
mode: motion
|
||||
events:
|
||||
retain:
|
||||
default: 14
|
||||
mode: active_objects
|
||||
objects:
|
||||
dog: 2
|
||||
car: 7
|
||||
```
|
||||
|
||||
## Can I have "continuous" recordings, but only at certain times?
|
||||
|
||||
Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only record in certain situations or at certain times.
|
||||
|
||||
@@ -210,6 +210,10 @@ birdseye:
|
||||
# Optional: ffmpeg configuration
|
||||
# More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets
|
||||
ffmpeg:
|
||||
# Optional: ffmpeg binry path (default: shown below)
|
||||
# can also be set to `7.0` or `5.0` to specify one of the included versions
|
||||
# or can be set to any path that holds `bin/ffmpeg` & `bin/ffprobe`
|
||||
path: "default"
|
||||
# Optional: global ffmpeg args (default: shown below)
|
||||
global_args: -hide_banner -loglevel warning -threads 2
|
||||
# Optional: global hwaccel args (default: auto detect)
|
||||
@@ -271,13 +275,13 @@ detect:
|
||||
# especially when using separate streams for detect and record.
|
||||
# Use this setting to make the timeline bounding boxes more closely align
|
||||
# with the recording. The value can be positive or negative.
|
||||
# TIP: Imagine there is an event clip with a person walking from left to right.
|
||||
# If the event timeline bounding box is consistently to the left of the person
|
||||
# TIP: Imagine there is an tracked object clip with a person walking from left to right.
|
||||
# If the tracked object lifecycle bounding box is consistently to the left of the person
|
||||
# then the value should be decreased. Similarly, if a person is walking from
|
||||
# left to right and the bounding box is consistently ahead of the person
|
||||
# then the value should be increased.
|
||||
# TIP: This offset is dynamic so you can change the value and it will update existing
|
||||
# events, this makes it easy to tune.
|
||||
# tracked objects, this makes it easy to tune.
|
||||
# WARNING: Fast moving objects will likely not have the bounding box align.
|
||||
annotation_offset: 0
|
||||
|
||||
@@ -394,9 +398,9 @@ record:
|
||||
sync_recordings: False
|
||||
# Optional: Retention settings for recording
|
||||
retain:
|
||||
# Optional: Number of days to retain recordings regardless of events (default: shown below)
|
||||
# NOTE: This should be set to 0 and retention should be defined in events section below
|
||||
# if you only want to retain recordings of events.
|
||||
# Optional: Number of days to retain recordings regardless of tracked objects (default: shown below)
|
||||
# NOTE: This should be set to 0 and retention should be defined in alerts and detections section below
|
||||
# if you only want to retain recordings of alerts and detections.
|
||||
days: 0
|
||||
# Optional: Mode for retention. Available options are: all, motion, and active_objects
|
||||
# all - save all recording segments regardless of activity
|
||||
@@ -419,34 +423,48 @@ record:
|
||||
# Optional: Quality of recording preview (default: shown below).
|
||||
# Options are: very_low, low, medium, high, very_high
|
||||
quality: medium
|
||||
# Optional: Event recording settings
|
||||
events:
|
||||
# Optional: Number of seconds before the event to include (default: shown below)
|
||||
# Optional: alert recording settings
|
||||
alerts:
|
||||
# Optional: Number of seconds before the alert to include (default: shown below)
|
||||
pre_capture: 5
|
||||
# Optional: Number of seconds after the event to include (default: shown below)
|
||||
# Optional: Number of seconds after the alert to include (default: shown below)
|
||||
post_capture: 5
|
||||
# Optional: Objects to save recordings for. (default: all tracked objects)
|
||||
objects:
|
||||
- person
|
||||
# Optional: Retention settings for recordings of events
|
||||
# Optional: Retention settings for recordings of alerts
|
||||
retain:
|
||||
# Required: Default retention days (default: shown below)
|
||||
default: 10
|
||||
# Required: Retention days (default: shown below)
|
||||
days: 14
|
||||
# Optional: Mode for retention. (default: shown below)
|
||||
# all - save all recording segments for events regardless of activity
|
||||
# motion - save all recordings segments for events with any detected motion
|
||||
# active_objects - save all recording segments for event with active/moving objects
|
||||
# all - save all recording segments for alerts regardless of activity
|
||||
# motion - save all recordings segments for alerts with any detected motion
|
||||
# active_objects - save all recording segments for alerts with active/moving objects
|
||||
#
|
||||
# NOTE: If the retain mode for the camera is more restrictive than the mode configured
|
||||
# here, the segments will already be gone by the time this mode is applied.
|
||||
# For example, if the camera retain mode is "motion", the segments without motion are
|
||||
# never stored, so setting the mode to "all" here won't bring them back.
|
||||
mode: motion
|
||||
# Optional: detection recording settings
|
||||
detections:
|
||||
# Optional: Number of seconds before the detection to include (default: shown below)
|
||||
pre_capture: 5
|
||||
# Optional: Number of seconds after the detection to include (default: shown below)
|
||||
post_capture: 5
|
||||
# Optional: Retention settings for recordings of detections
|
||||
retain:
|
||||
# Required: Retention days (default: shown below)
|
||||
days: 14
|
||||
# Optional: Mode for retention. (default: shown below)
|
||||
# all - save all recording segments for detections regardless of activity
|
||||
# motion - save all recordings segments for detections with any detected motion
|
||||
# active_objects - save all recording segments for detections with active/moving objects
|
||||
#
|
||||
# NOTE: If the retain mode for the camera is more restrictive than the mode configured
|
||||
# here, the segments will already be gone by the time this mode is applied.
|
||||
# For example, if the camera retain mode is "motion", the segments without motion are
|
||||
# never stored, so setting the mode to "all" here won't bring them back.
|
||||
mode: motion
|
||||
# Optional: Per object retention days
|
||||
objects:
|
||||
person: 15
|
||||
|
||||
# Optional: Configuration for the jpg snapshots written to the clips directory for each event
|
||||
# Optional: Configuration for the jpg snapshots written to the clips directory for each tracked object
|
||||
# NOTE: Can be overridden at the camera level
|
||||
snapshots:
|
||||
# Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below)
|
||||
@@ -477,16 +495,16 @@ snapshots:
|
||||
semantic_search:
|
||||
# Optional: Enable semantic search (default: shown below)
|
||||
enabled: False
|
||||
# Optional: Re-index embeddings database from historical events (default: shown below)
|
||||
# Optional: Re-index embeddings database from historical tracked objects (default: shown below)
|
||||
reindex: False
|
||||
|
||||
# Optional: Configuration for AI generated event descriptions
|
||||
# Optional: Configuration for AI generated tracked object descriptions
|
||||
# NOTE: Semantic Search must be enabled for this to do anything.
|
||||
# WARNING: Depending on the provider, this will send thumbnails over the internet
|
||||
# to Google or OpenAI's LLMs to generate descriptions. It can be overridden at
|
||||
# the camera level (enabled: False) to enhance privacy for indoor cameras.
|
||||
genai:
|
||||
# Optional: Enable Google Gemini description generation (default: shown below)
|
||||
# Optional: Enable AI description generation (default: shown below)
|
||||
enabled: False
|
||||
# Required if enabled: Provider must be one of ollama, gemini, or openai
|
||||
provider: ollama
|
||||
@@ -694,6 +712,18 @@ cameras:
|
||||
# By default the cameras are sorted alphabetically.
|
||||
order: 0
|
||||
|
||||
# Optional: Configuration for AI generated tracked object descriptions
|
||||
genai:
|
||||
# Optional: Enable AI description generation (default: shown below)
|
||||
enabled: False
|
||||
# Optional: The default prompt for generating descriptions. Can use replacement
|
||||
# variables like "label", "sub_label", "camera" to make more dynamic. (default: shown below)
|
||||
prompt: "Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background."
|
||||
# Optional: Object specific prompts to customize description results
|
||||
# Format: {label}: {prompt}
|
||||
object_prompts:
|
||||
person: "My special person prompt."
|
||||
|
||||
# Optional
|
||||
ui:
|
||||
# Optional: Set a timezone to use in the UI (default: use browser local time)
|
||||
|
||||
@@ -21,7 +21,7 @@ Birdseye RTSP restream can be accessed at `rtsp://<frigate_host>:8554/birdseye`.
|
||||
|
||||
```yaml
|
||||
birdseye:
|
||||
restream: true
|
||||
restream: True
|
||||
```
|
||||
|
||||
### Securing Restream With Authentication
|
||||
|
||||
@@ -7,13 +7,13 @@ The Review page of the Frigate UI is for quickly reviewing historical footage of
|
||||
|
||||
Review items are filterable by date, object type, and camera.
|
||||
|
||||
### Review items vs. events
|
||||
### Review items vs. tracked objects (formerly "events")
|
||||
|
||||
In Frigate 0.13 and earlier versions, the UI presented "events". An event was synonymous with a tracked or detected object. In Frigate 0.14 and later, a review item is a time period where any number of tracked objects were active.
|
||||
|
||||
For example, consider a situation where two people walked past your house. One was walking a dog. At the same time, a car drove by on the street behind them.
|
||||
|
||||
In this scenario, Frigate 0.13 and earlier would show 4 events in the UI - one for each person, another for the dog, and yet another for the car. You would have had 4 separate videos to watch even though they would have all overlapped.
|
||||
In this scenario, Frigate 0.13 and earlier would show 4 "events" in the UI - one for each person, another for the dog, and yet another for the car. You would have had 4 separate videos to watch even though they would have all overlapped.
|
||||
|
||||
In 0.14 and later, all of that is bundled into a single review item which starts and ends to capture all of that activity. Reviews for a single camera cannot overlap. Once you have watched that time period on that camera, it is marked as reviewed.
|
||||
|
||||
|
||||
@@ -3,11 +3,15 @@ id: semantic_search
|
||||
title: Using Semantic Search
|
||||
---
|
||||
|
||||
Semantic search works by embedding images and/or text into a vector representation identified by numbers. Frigate has support for two such models which both run locally: [OpenAI CLIP](https://openai.com/research/clip) and [all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2). Embeddings are then saved to a local instance of [ChromaDB](https://trychroma.com).
|
||||
Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results.
|
||||
|
||||
Frigate has support for two models to create embeddings, both of which run locally: [OpenAI CLIP](https://openai.com/research/clip) and [all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2). Embeddings are then saved to a local instance of [ChromaDB](https://trychroma.com).
|
||||
|
||||
Semantic Search is accessed via the _Explore_ view in the Frigate UI.
|
||||
|
||||
## Configuration
|
||||
|
||||
Semantic Search is a global configuration setting.
|
||||
Semantic search is disabled by default, and must be enabled in your config file before it can be used. Semantic Search is a global configuration setting.
|
||||
|
||||
```yaml
|
||||
semantic_search:
|
||||
@@ -17,22 +21,24 @@ semantic_search:
|
||||
|
||||
:::tip
|
||||
|
||||
The embeddings database can be re-indexed from the existing detections in your database by adding `reindex: True` to your `semantic_search` configuration. Depending on the number of detections you have, it can take up to 30 minutes to complete and may max out your CPU while indexing. Make sure to set the config back to `False` before restarting Frigate again.
|
||||
The embeddings database can be re-indexed from the existing tracked objects in your database by adding `reindex: True` to your `semantic_search` configuration. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. Make sure to set the config back to `False` before restarting Frigate again.
|
||||
|
||||
If you are enabling the Search feature for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to enable the `reindex` feature in order to do that.
|
||||
|
||||
:::
|
||||
|
||||
### OpenAI CLIP
|
||||
|
||||
This model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on detections to encode the thumbnail image and store it in Chroma. When searching detections via text in the search box, frigate will perform a `text -> image` similarity search against this embedding. When clicking "FIND SIMILAR" next to a detection, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
||||
This model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in Chroma. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
||||
|
||||
### all-MiniLM-L6-v2
|
||||
|
||||
This is a sentence embedding model that has been fine tuned on over 1 billion sentence pairs. This model is used to embed detection descriptions and perform searches against them. Descriptions can be created and/or modified on the search page when clicking on the info icon next to a detection. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate event descriptions.
|
||||
This is a sentence embedding model that has been fine tuned on over 1 billion sentence pairs. This model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
||||
|
||||
## Usage Tips
|
||||
## Usage
|
||||
|
||||
1. Semantic search is used in conjunction with the other filters available on the search page. Use a combination of traditional filtering and semantic search for the best results.
|
||||
1. Semantic search is used in conjunction with the other filters available on the Search page. Use a combination of traditional filtering and semantic search for the best results.
|
||||
2. The comparison between text and image embedding distances generally means that results matching `description` will appear first, even if a `thumbnail` embedding may be a better match. Play with the "Search Type" filter to help find what you are looking for.
|
||||
3. Make your search language and tone closely match your descriptions. If you are using thumbnail search, phrase your query as an image caption.
|
||||
4. Semantic search on thumbnails tends to return better results when matching large subjects that take up most of the frame. Small things like "cat" tend to not work well.
|
||||
5. Experiment! Find a detection you want to test and start typing keywords to see what works for you.
|
||||
5. Experiment! Find a tracked object you want to test and start typing keywords to see what works for you.
|
||||
|
||||
@@ -64,7 +64,7 @@ cameras:
|
||||
|
||||
### Restricting zones to specific objects
|
||||
|
||||
Sometimes you want to limit a zone to specific object types to have more granular control of when events/snapshots are saved. The following example will limit one zone to person objects and the other to cars.
|
||||
Sometimes you want to limit a zone to specific object types to have more granular control of when alerts, detections, and snapshots are saved. The following example will limit one zone to person objects and the other to cars.
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
@@ -80,7 +80,7 @@ cameras:
|
||||
- car
|
||||
```
|
||||
|
||||
Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. You will get events for person objects that enter anywhere in the yard, and events for cars only if they enter the street.
|
||||
Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. Objects will be tracked for any `person` that enter anywhere in the yard, and for cars only if they enter the street.
|
||||
|
||||
### Zone Loitering
|
||||
|
||||
|
||||
@@ -16,10 +16,6 @@ A box returned from the object detection model that outlines an object in the fr
|
||||
- A gray thin line indicates that object is detected as being stationary
|
||||
- A thick line indicates that object is the subject of autotracking (when enabled).
|
||||
|
||||
## Event
|
||||
|
||||
The time period starting when a tracked object entered the frame and ending when it left the frame, including any time that the object remained still. Events are saved when it is considered a [true positive](#threshold) and meets the requirements for a snapshot or recording to be saved.
|
||||
|
||||
## False Positive
|
||||
|
||||
An incorrect detection of an object type. For example a dog being detected as a person, a chair being detected as a dog, etc. A person being detected in an area you want to ignore is not a false positive.
|
||||
@@ -64,6 +60,10 @@ The threshold is the median score that an object must reach in order to be consi
|
||||
|
||||
The top score for an object is the highest median score for an object.
|
||||
|
||||
## Tracked Object ("event" in previous versions)
|
||||
|
||||
The time period starting when a tracked object entered the frame and ending when it left the frame, including any time that the object remained still. Tracked objects are saved when it is considered a [true positive](#threshold) and meets the requirements for a snapshot or recording to be saved.
|
||||
|
||||
## Zone
|
||||
|
||||
Zones are areas of interest, zones can be used for notifications and for limiting the areas where Frigate will create an [event](#event). [See the zone docs for more info](/configuration/zones)
|
||||
|
||||
@@ -87,6 +87,10 @@ Inference speeds will vary greatly depending on the GPU and the model used.
|
||||
| Quadro P400 2GB | 20 - 25 ms |
|
||||
| Quadro P2000 | ~ 12 ms |
|
||||
|
||||
#### AMD GPUs
|
||||
|
||||
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many AMD GPUs.
|
||||
|
||||
### Community Supported:
|
||||
|
||||
#### Nvidia Jetson
|
||||
|
||||
@@ -73,23 +73,23 @@ Users of the Snapcraft build of Docker cannot use storage locations outside your
|
||||
|
||||
Frigate utilizes shared memory to store frames during processing. The default `shm-size` provided by Docker is **64MB**.
|
||||
|
||||
The default shm size of **64MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in docker-compose).
|
||||
The default shm size of **128MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in docker-compose).
|
||||
|
||||
The Frigate container also stores logs in shm, which can take up to **30MB**, so make sure to take this into account in your math as well.
|
||||
The Frigate container also stores logs in shm, which can take up to **40MB**, so make sure to take this into account in your math as well.
|
||||
|
||||
You can calculate the necessary shm size for each camera with the following formula using the resolution specified for detect:
|
||||
You can calculate the **minimum** shm size for each camera with the following formula using the resolution specified for detect:
|
||||
|
||||
```console
|
||||
# Replace <width> and <height>
|
||||
$ python -c 'print("{:.2f}MB".format((<width> * <height> * 1.5 * 9 + 270480) / 1048576))'
|
||||
$ python -c 'print("{:.2f}MB".format((<width> * <height> * 1.5 * 10 + 270480) / 1048576))'
|
||||
|
||||
# Example for 1280x720
|
||||
$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 9 + 270480) / 1048576))'
|
||||
12.12MB
|
||||
$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 10 + 270480) / 1048576))'
|
||||
13.44MB
|
||||
|
||||
# Example for eight cameras detecting at 1280x720, including logs
|
||||
$ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 9 + 270480) / 1048576) * 8 + 30))'
|
||||
126.99MB
|
||||
$ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 10 + 270480) / 1048576) * 8 + 40))'
|
||||
136.99MB
|
||||
```
|
||||
|
||||
The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration.
|
||||
|
||||
@@ -238,7 +238,7 @@ Now that you know where you need to mask, use the "Mask & Zone creator" in the o
|
||||
|
||||
:::warning
|
||||
|
||||
Note that motion masks should not be used to mark out areas where you do not want objects to be detected or to reduce false positives. They do not alter the image sent to object detection, so you can still get events and detections in areas with motion masks. These only prevent motion in these areas from initiating object detection.
|
||||
Note that motion masks should not be used to mark out areas where you do not want objects to be detected or to reduce false positives. They do not alter the image sent to object detection, so you can still get tracked objects, alerts, and detections in areas with motion masks. These only prevent motion in these areas from initiating object detection.
|
||||
|
||||
:::
|
||||
|
||||
@@ -294,7 +294,15 @@ cameras:
|
||||
|
||||
If you don't have separate streams for detect and record, you would just add the record role to the list on the first input.
|
||||
|
||||
By default, Frigate will retain video of all events for 10 days. The full set of options for recording can be found [here](../configuration/reference.md).
|
||||
:::note
|
||||
|
||||
If you only define one stream in your `inputs` and do not assign a `detect` role to it, Frigate will automatically assign it the `detect` role. Frigate will always decode a stream to support motion detection, Birdseye, the API image endpoints, and other features, even if you have disabled object detection with `enabled: False` in your config's `detect` section.
|
||||
|
||||
If you only plan to use Frigate for recording, it is still recommended to define a `detect` role for a low resolution stream to minimize resource usage from the required stream decoding.
|
||||
|
||||
:::
|
||||
|
||||
By default, Frigate will retain video of all tracked objects for 10 days. The full set of options for recording can be found [here](../configuration/reference.md).
|
||||
|
||||
### Step 7: Complete config
|
||||
|
||||
@@ -309,4 +317,3 @@ Now that you have a working install, you can use the following documentation for
|
||||
3. [Review](../configuration/review.md)
|
||||
4. [Masks](../configuration/masks.md)
|
||||
5. [Home Assistant Integration](../integrations/home-assistant.md) - Integrate with Home Assistant
|
||||
|
||||
|
||||
@@ -7,11 +7,11 @@ The best way to get started with notifications for Frigate is to use the [Bluepr
|
||||
|
||||
It is generally recommended to trigger notifications based on the `frigate/reviews` mqtt topic. This provides the event_id(s) needed to fetch [thumbnails/snapshots/clips](../integrations/home-assistant.md#notification-api) and other useful information to customize when and where you want to receive alerts. The data is published in the form of a change feed, which means you can reference the "previous state" of the object in the `before` section and the "current state" of the object in the `after` section. You can see an example [here](../integrations/mqtt.md#frigateevents).
|
||||
|
||||
Here is a simple example of a notification automation of events which will update the existing notification for each change. This means the image you see in the notification will update as Frigate finds a "better" image.
|
||||
Here is a simple example of a notification automation of tracked objects which will update the existing notification for each change. This means the image you see in the notification will update as Frigate finds a "better" image.
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: Notify of events
|
||||
- alias: Notify of tracked object
|
||||
trigger:
|
||||
platform: mqtt
|
||||
topic: frigate/events
|
||||
|
||||
@@ -189,15 +189,15 @@ Example parameters:
|
||||
|
||||
### `GET /api/<camera_name>/<label>/thumbnail.jpg`
|
||||
|
||||
Returns the thumbnail from the latest event for the given camera and label combo. Using `any` as the label will return the latest thumbnail regardless of type.
|
||||
Returns the thumbnail from the latest tracked object for the given camera and label combo. Using `any` as the label will return the latest thumbnail regardless of type.
|
||||
|
||||
### `GET /api/<camera_name>/<label>/clip.mp4`
|
||||
|
||||
Returns the clip from the latest event for the given camera and label combo. Using `any` as the label will return the latest clip regardless of type.
|
||||
Returns the clip from the latest tracked object for the given camera and label combo. Using `any` as the label will return the latest clip regardless of type.
|
||||
|
||||
### `GET /api/<camera_name>/<label>/snapshot.jpg`
|
||||
|
||||
Returns the snapshot image from the latest event for the given camera and label combo. Using `any` as the label will return the latest thumbnail regardless of type.
|
||||
Returns the snapshot image from the latest tracked object for the given camera and label combo. Using `any` as the label will return the latest thumbnail regardless of type.
|
||||
|
||||
### `GET /api/<camera_name>/grid.jpg`
|
||||
|
||||
@@ -385,9 +385,9 @@ Specific preview frame from preview cache.
|
||||
|
||||
Looping image made from preview video / frames during this time range.
|
||||
|
||||
| param | Type | Description |
|
||||
| --------- | ---- | -------------------------------- |
|
||||
| `format` | str | Format of preview [`gif`, `mp4`] |
|
||||
| param | Type | Description |
|
||||
| -------- | ---- | -------------------------------- |
|
||||
| `format` | str | Format of preview [`gif`, `mp4`] |
|
||||
|
||||
## Recordings
|
||||
|
||||
|
||||
@@ -148,19 +148,19 @@ Home Assistant > Configuration > Integrations > Frigate > Options
|
||||
|
||||
## Entities Provided
|
||||
|
||||
| Platform | Description |
|
||||
| --------------- | --------------------------------------------------------------------------------- |
|
||||
| `camera` | Live camera stream (requires RTSP). |
|
||||
| `image` | Image of the latest detected object for each camera. |
|
||||
| `sensor` | States to monitor Frigate performance, object counts for all zones and cameras. |
|
||||
| `switch` | Switch entities to toggle detection, recordings and snapshots. |
|
||||
| `binary_sensor` | A "motion" binary sensor entity per camera/zone/object. |
|
||||
| Platform | Description |
|
||||
| --------------- | ------------------------------------------------------------------------------- |
|
||||
| `camera` | Live camera stream (requires RTSP). |
|
||||
| `image` | Image of the latest detected object for each camera. |
|
||||
| `sensor` | States to monitor Frigate performance, object counts for all zones and cameras. |
|
||||
| `switch` | Switch entities to toggle detection, recordings and snapshots. |
|
||||
| `binary_sensor` | A "motion" binary sensor entity per camera/zone/object. |
|
||||
|
||||
## Media Browser Support
|
||||
|
||||
The integration provides:
|
||||
|
||||
- Browsing event recordings with thumbnails
|
||||
- Browsing tracked object recordings with thumbnails
|
||||
- Browsing snapshots
|
||||
- Browsing recordings by month, day, camera, time
|
||||
|
||||
@@ -183,19 +183,19 @@ For clips to be castable to media devices, audio is required and may need to be
|
||||
|
||||
Many people do not want to expose Frigate to the web, so the integration creates some public API endpoints that can be used for notifications.
|
||||
|
||||
To load a thumbnail for an event:
|
||||
To load a thumbnail for a tracked object:
|
||||
|
||||
```
|
||||
https://HA_URL/api/frigate/notifications/<event-id>/thumbnail.jpg
|
||||
```
|
||||
|
||||
To load a snapshot for an event:
|
||||
To load a snapshot for a tracked object:
|
||||
|
||||
```
|
||||
https://HA_URL/api/frigate/notifications/<event-id>/snapshot.jpg
|
||||
```
|
||||
|
||||
To load a video clip of an event:
|
||||
To load a video clip of a tracked object:
|
||||
|
||||
```
|
||||
https://HA_URL/api/frigate/notifications/<event-id>/clip.mp4
|
||||
|
||||
@@ -19,7 +19,7 @@ Causes Frigate to exit. Docker should be configured to automatically restart the
|
||||
|
||||
### `frigate/events`
|
||||
|
||||
Message published for each changed event. The first message is published when the tracked object is no longer marked as a false_positive. When Frigate finds a better snapshot of the tracked object or when a zone change occurs, it will publish a message with the same id. When the event ends, a final message is published with `end_time` set.
|
||||
Message published for each changed tracked object. The first message is published when the tracked object is no longer marked as a false_positive. When Frigate finds a better snapshot of the tracked object or when a zone change occurs, it will publish a message with the same id. When the tracked object ends, a final message is published with `end_time` set.
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -45,6 +45,7 @@ Message published for each changed event. The first message is published when th
|
||||
"thumbnail": null,
|
||||
"has_snapshot": false,
|
||||
"has_clip": false,
|
||||
"active": true, // convenience attribute, this is strictly opposite of "stationary"
|
||||
"stationary": false, // whether or not the object is considered stationary
|
||||
"motionless_count": 0, // number of frames the object has been motionless
|
||||
"position_changes": 2, // number of times the object has moved from a stationary position
|
||||
@@ -74,6 +75,7 @@ Message published for each changed event. The first message is published when th
|
||||
"thumbnail": null,
|
||||
"has_snapshot": false,
|
||||
"has_clip": false,
|
||||
"active": true, // convenience attribute, this is strictly opposite of "stationary"
|
||||
"stationary": false, // whether or not the object is considered stationary
|
||||
"motionless_count": 0, // number of frames the object has been motionless
|
||||
"position_changes": 2, // number of times the object has changed position
|
||||
@@ -98,24 +100,22 @@ Message published for each changed review item. The first message is published w
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "update", // new, update, end
|
||||
"type": "update", // new, update, end
|
||||
"before": {
|
||||
"id": "1718987129.308396-fqk5ka", // review_id
|
||||
"id": "1718987129.308396-fqk5ka", // review_id
|
||||
"camera": "front_cam",
|
||||
"start_time": 1718987129.308396,
|
||||
"end_time": null,
|
||||
"severity": "detection",
|
||||
"thumb_path": "/media/frigate/clips/review/thumb-front_cam-1718987129.308396-fqk5ka.webp",
|
||||
"data": {
|
||||
"detections": [ // list of event IDs
|
||||
"detections": [
|
||||
// list of event IDs
|
||||
"1718987128.947436-g92ztx",
|
||||
"1718987148.879516-d7oq7r",
|
||||
"1718987126.934663-q5ywpt"
|
||||
],
|
||||
"objects": [
|
||||
"person",
|
||||
"car"
|
||||
],
|
||||
"objects": ["person", "car"],
|
||||
"sub_labels": [],
|
||||
"zones": [],
|
||||
"audio": []
|
||||
@@ -134,14 +134,9 @@ Message published for each changed review item. The first message is published w
|
||||
"1718987148.879516-d7oq7r",
|
||||
"1718987126.934663-q5ywpt"
|
||||
],
|
||||
"objects": [
|
||||
"person",
|
||||
"car"
|
||||
],
|
||||
"objects": ["person", "car"],
|
||||
"sub_labels": ["Bob"],
|
||||
"zones": [
|
||||
"front_yard"
|
||||
],
|
||||
"zones": ["front_yard"],
|
||||
"audio": []
|
||||
}
|
||||
}
|
||||
@@ -152,6 +147,14 @@ Message published for each changed review item. The first message is published w
|
||||
|
||||
Same data available at `/api/stats` published at a configurable interval.
|
||||
|
||||
### `frigate/notifications/set`
|
||||
|
||||
Topic to turn notifications on and off. Expected values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/notifications/state`
|
||||
|
||||
Topic with current state of notifications. Published values are `ON` and `OFF`.
|
||||
|
||||
## Frigate Camera Topics
|
||||
|
||||
### `frigate/<camera_name>/<object_name>`
|
||||
@@ -159,11 +162,23 @@ Same data available at `/api/stats` published at a configurable interval.
|
||||
Publishes the count of objects for the camera for use as a sensor in Home Assistant.
|
||||
`all` can be used as the object_name for the count of all objects for the camera.
|
||||
|
||||
### `frigate/<camera_name>/<object_name>/active`
|
||||
|
||||
Publishes the count of active objects for the camera for use as a sensor in Home
|
||||
Assistant. `all` can be used as the object_name for the count of all active objects
|
||||
for the camera.
|
||||
|
||||
### `frigate/<zone_name>/<object_name>`
|
||||
|
||||
Publishes the count of objects for the zone for use as a sensor in Home Assistant.
|
||||
`all` can be used as the object_name for the count of all objects for the zone.
|
||||
|
||||
### `frigate/<zone_name>/<object_name>/active`
|
||||
|
||||
Publishes the count of active objects for the zone for use as a sensor in Home
|
||||
Assistant. `all` can be used as the object_name for the count of all objects for the
|
||||
zone.
|
||||
|
||||
### `frigate/<camera_name>/<object_name>/snapshot`
|
||||
|
||||
Publishes a jpeg encoded frame of the detected object type. When the object is no longer detected, the highest confidence image is published or the original image
|
||||
|
||||
@@ -19,7 +19,7 @@ Once logged in, you can generate an API key for Frigate in Settings.
|
||||
|
||||
### Set your API key
|
||||
|
||||
In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the `SEND TO FRIGATE+` buttons on the events page. Home Assistant Addon users can set it under Settings > Addons > Frigate NVR > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch).
|
||||
In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the `Frigate+` buttons on the Explore page. Home Assistant Addon users can set it under Settings > Addons > Frigate NVR > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch).
|
||||
|
||||
:::warning
|
||||
|
||||
@@ -29,7 +29,7 @@ You cannot use the `environment_vars` section of your configuration file to set
|
||||
|
||||
## Submit examples
|
||||
|
||||
Once your API key is configured, you can submit examples directly from the events page in Frigate using the `SEND TO FRIGATE+` button.
|
||||
Once your API key is configured, you can submit examples directly from the Explore page in Frigate using the `Frigate+` button.
|
||||
|
||||
:::note
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ Frigate+ models support a more relevant set of objects for security cameras. Cur
|
||||
|
||||
### Label attributes
|
||||
|
||||
Frigate has special handling for some labels when using Frigate+ models. `face`, `license_plate`, `amazon`, `ups`, and `fedex` are considered attribute labels which are not tracked like regular objects and do not generate events. In addition, the `threshold` filter will have no effect on these labels. You should adjust the `min_score` and other filter values as needed.
|
||||
Frigate has special handling for some labels when using Frigate+ models. `face`, `license_plate`, `amazon`, `ups`, and `fedex` are considered attribute labels which are not tracked like regular objects and do not generate review items directly. In addition, the `threshold` filter will have no effect on these labels. You should adjust the `min_score` and other filter values as needed.
|
||||
|
||||
In order to have Frigate start using these attribute labels, you will need to add them to the list of objects to track:
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ ffmpeg:
|
||||
record: preset-record-generic-audio-aac
|
||||
```
|
||||
|
||||
### I can't view events or recordings in the Web UI.
|
||||
### I can't view recordings in the Web UI.
|
||||
|
||||
Ensure your cameras send h264 encoded video, or [transcode them](/configuration/restream.md).
|
||||
|
||||
|
||||
928
docs/package-lock.json
generated
928
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -14,15 +14,15 @@
|
||||
"write-heading-ids": "docusaurus write-heading-ids"
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "^3.4.0",
|
||||
"@docusaurus/preset-classic": "^3.4.0",
|
||||
"@docusaurus/theme-mermaid": "^3.4.0",
|
||||
"@docusaurus/core": "^3.5.2",
|
||||
"@docusaurus/preset-classic": "^3.5.2",
|
||||
"@docusaurus/theme-mermaid": "^3.5.2",
|
||||
"@mdx-js/react": "^3.0.0",
|
||||
"clsx": "^2.0.0",
|
||||
"prism-react-renderer": "^2.1.0",
|
||||
"prism-react-renderer": "^2.4.0",
|
||||
"raw-loader": "^4.0.2",
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0"
|
||||
"react": "^18.3.1",
|
||||
"react-dom": "^18.3.1"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
@@ -39,7 +39,7 @@
|
||||
"devDependencies": {
|
||||
"@docusaurus/module-type-aliases": "^3.4.0",
|
||||
"@docusaurus/types": "^3.4.0",
|
||||
"@types/react": "^18.2.79"
|
||||
"@types/react": "^18.3.7"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0"
|
||||
|
||||
@@ -1,17 +1,28 @@
|
||||
import faulthandler
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from flask import cli
|
||||
|
||||
from frigate.app import FrigateApp
|
||||
|
||||
faulthandler.enable()
|
||||
|
||||
threading.current_thread().name = "frigate"
|
||||
def main() -> None:
|
||||
faulthandler.enable()
|
||||
|
||||
# Clear all existing handlers.
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
handlers=[],
|
||||
force=True,
|
||||
)
|
||||
|
||||
threading.current_thread().name = "frigate"
|
||||
cli.show_server_banner = lambda *x: None
|
||||
|
||||
# Run the main application.
|
||||
FrigateApp().start()
|
||||
|
||||
cli.show_server_banner = lambda *x: None
|
||||
|
||||
if __name__ == "__main__":
|
||||
frigate_app = FrigateApp()
|
||||
|
||||
frigate_app.start()
|
||||
main()
|
||||
|
||||
@@ -414,7 +414,7 @@ def ffprobe():
|
||||
output = []
|
||||
|
||||
for path in paths:
|
||||
ffprobe = ffprobe_stream(path.strip())
|
||||
ffprobe = ffprobe_stream(current_app.frigate_config.ffmpeg, path.strip())
|
||||
output.append(
|
||||
{
|
||||
"return_code": ffprobe.returncode,
|
||||
|
||||
@@ -251,6 +251,74 @@ def events():
|
||||
return jsonify(list(events))
|
||||
|
||||
|
||||
@EventBp.route("/events/explore")
|
||||
def events_explore():
|
||||
limit = request.args.get("limit", 10, type=int)
|
||||
|
||||
subquery = Event.select(
|
||||
Event.id,
|
||||
Event.camera,
|
||||
Event.label,
|
||||
Event.zones,
|
||||
Event.start_time,
|
||||
Event.end_time,
|
||||
Event.has_clip,
|
||||
Event.has_snapshot,
|
||||
Event.plus_id,
|
||||
Event.retain_indefinitely,
|
||||
Event.sub_label,
|
||||
Event.top_score,
|
||||
Event.false_positive,
|
||||
Event.box,
|
||||
Event.data,
|
||||
fn.rank()
|
||||
.over(partition_by=[Event.label], order_by=[Event.start_time.desc()])
|
||||
.alias("rank"),
|
||||
fn.COUNT(Event.id).over(partition_by=[Event.label]).alias("event_count"),
|
||||
).alias("subquery")
|
||||
|
||||
query = (
|
||||
Event.select(
|
||||
subquery.c.id,
|
||||
subquery.c.camera,
|
||||
subquery.c.label,
|
||||
subquery.c.zones,
|
||||
subquery.c.start_time,
|
||||
subquery.c.end_time,
|
||||
subquery.c.has_clip,
|
||||
subquery.c.has_snapshot,
|
||||
subquery.c.plus_id,
|
||||
subquery.c.retain_indefinitely,
|
||||
subquery.c.sub_label,
|
||||
subquery.c.top_score,
|
||||
subquery.c.false_positive,
|
||||
subquery.c.box,
|
||||
subquery.c.data,
|
||||
subquery.c.event_count,
|
||||
)
|
||||
.from_(subquery)
|
||||
.where(subquery.c.rank <= limit)
|
||||
.order_by(subquery.c.event_count.desc(), subquery.c.start_time.desc())
|
||||
.dicts()
|
||||
)
|
||||
|
||||
events = list(query.iterator())
|
||||
|
||||
processed_events = [
|
||||
{k: v for k, v in event.items() if k != "data"}
|
||||
| {
|
||||
"data": {
|
||||
k: v
|
||||
for k, v in event["data"].items()
|
||||
if k in ["type", "score", "top_score", "description"]
|
||||
}
|
||||
}
|
||||
for event in events
|
||||
]
|
||||
|
||||
return jsonify(processed_events)
|
||||
|
||||
|
||||
@EventBp.route("/event_ids")
|
||||
def event_ids():
|
||||
idString = request.args.get("ids")
|
||||
@@ -274,7 +342,7 @@ def event_ids():
|
||||
@EventBp.route("/events/search")
|
||||
def events_search():
|
||||
query = request.args.get("query", type=str)
|
||||
search_type = request.args.get("search_type", "text", type=str)
|
||||
search_type = request.args.get("search_type", "thumbnail,description", type=str)
|
||||
include_thumbnails = request.args.get("include_thumbnails", default=1, type=int)
|
||||
limit = request.args.get("limit", 50, type=int)
|
||||
|
||||
@@ -285,7 +353,10 @@ def events_search():
|
||||
after = request.args.get("after", type=float)
|
||||
before = request.args.get("before", type=float)
|
||||
|
||||
if not query:
|
||||
# for similarity search
|
||||
event_id = request.args.get("event_id", type=str)
|
||||
|
||||
if not query and not event_id:
|
||||
return make_response(
|
||||
jsonify(
|
||||
{
|
||||
@@ -317,7 +388,10 @@ def events_search():
|
||||
Event.zones,
|
||||
Event.start_time,
|
||||
Event.end_time,
|
||||
Event.has_clip,
|
||||
Event.has_snapshot,
|
||||
Event.data,
|
||||
Event.plus_id,
|
||||
ReviewSegment.thumb_path,
|
||||
]
|
||||
|
||||
@@ -358,10 +432,10 @@ def events_search():
|
||||
thumb_ids = {}
|
||||
desc_ids = {}
|
||||
|
||||
if search_type == "thumbnail":
|
||||
if search_type == "similarity":
|
||||
# Grab the ids of events that match the thumbnail image embeddings
|
||||
try:
|
||||
search_event: Event = Event.get(Event.id == query)
|
||||
search_event: Event = Event.get(Event.id == event_id)
|
||||
except DoesNotExist:
|
||||
return make_response(
|
||||
jsonify(
|
||||
@@ -379,31 +453,41 @@ def events_search():
|
||||
n_results=limit,
|
||||
where=where,
|
||||
)
|
||||
thumb_ids = dict(zip(thumb_result["ids"][0], thumb_result["distances"][0]))
|
||||
else:
|
||||
thumb_result = context.embeddings.thumbnail.query(
|
||||
query_texts=[query],
|
||||
n_results=limit,
|
||||
where=where,
|
||||
)
|
||||
# Do a rudimentary normalization of the difference in distances returned by CLIP and MiniLM.
|
||||
thumb_ids = dict(
|
||||
zip(
|
||||
thumb_result["ids"][0],
|
||||
context.thumb_stats.normalize(thumb_result["distances"][0]),
|
||||
)
|
||||
)
|
||||
desc_result = context.embeddings.description.query(
|
||||
query_texts=[query],
|
||||
n_results=limit,
|
||||
where=where,
|
||||
)
|
||||
desc_ids = dict(
|
||||
zip(
|
||||
desc_result["ids"][0],
|
||||
context.desc_stats.normalize(desc_result["distances"][0]),
|
||||
else:
|
||||
search_types = search_type.split(",")
|
||||
|
||||
if "thumbnail" in search_types:
|
||||
thumb_result = context.embeddings.thumbnail.query(
|
||||
query_texts=[query],
|
||||
n_results=limit,
|
||||
where=where,
|
||||
)
|
||||
# Do a rudimentary normalization of the difference in distances returned by CLIP and MiniLM.
|
||||
thumb_ids = dict(
|
||||
zip(
|
||||
thumb_result["ids"][0],
|
||||
context.thumb_stats.normalize(thumb_result["distances"][0]),
|
||||
)
|
||||
)
|
||||
|
||||
if "description" in search_types:
|
||||
desc_result = context.embeddings.description.query(
|
||||
query_texts=[query],
|
||||
n_results=limit,
|
||||
where=where,
|
||||
)
|
||||
desc_ids = dict(
|
||||
zip(
|
||||
desc_result["ids"][0],
|
||||
context.desc_stats.normalize(desc_result["distances"][0]),
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
results = {}
|
||||
for event_id in thumb_ids.keys() | desc_ids:
|
||||
@@ -439,9 +523,11 @@ def events_search():
|
||||
events = [
|
||||
{k: v for k, v in event.items() if k != "data"}
|
||||
| {
|
||||
k: v
|
||||
for k, v in event["data"].items()
|
||||
if k in ["type", "score", "top_score", "description"]
|
||||
"data": {
|
||||
k: v
|
||||
for k, v in event["data"].items()
|
||||
if k in ["type", "score", "top_score", "description"]
|
||||
}
|
||||
}
|
||||
| {
|
||||
"search_distance": results[event["id"]]["distance"],
|
||||
|
||||
@@ -149,9 +149,9 @@ def export_delete(id: str):
|
||||
try:
|
||||
if process.name() != "ffmpeg":
|
||||
continue
|
||||
flist = process.open_files()
|
||||
if flist:
|
||||
for nt in flist:
|
||||
file_list = process.open_files()
|
||||
if file_list:
|
||||
for nt in file_list:
|
||||
if nt.path.startswith(EXPORT_DIR):
|
||||
files_in_use.append(nt.path.split("/")[-1])
|
||||
except psutil.Error:
|
||||
|
||||
@@ -17,6 +17,7 @@ from peewee import DoesNotExist, fn
|
||||
from tzlocal import get_localzone_name
|
||||
from werkzeug.utils import secure_filename
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import (
|
||||
CACHE_DIR,
|
||||
CLIPS_DIR,
|
||||
@@ -179,14 +180,20 @@ def latest_frame(camera_name):
|
||||
)
|
||||
|
||||
|
||||
@MediaBp.route("/<camera_name>/recordings/<frame_time>/snapshot.png")
|
||||
def get_snapshot_from_recording(camera_name: str, frame_time: str):
|
||||
@MediaBp.route("/<camera_name>/recordings/<frame_time>/snapshot.<format>")
|
||||
def get_snapshot_from_recording(camera_name: str, frame_time: str, format: str):
|
||||
if camera_name not in current_app.frigate_config.cameras:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Camera not found"}),
|
||||
404,
|
||||
)
|
||||
|
||||
if format not in ["png", "jpg"]:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Invalid format"}),
|
||||
400,
|
||||
)
|
||||
|
||||
frame_time = float(frame_time)
|
||||
recording_query = (
|
||||
Recordings.select(
|
||||
@@ -207,7 +214,14 @@ def get_snapshot_from_recording(camera_name: str, frame_time: str):
|
||||
try:
|
||||
recording: Recordings = recording_query.get()
|
||||
time_in_segment = frame_time - recording.start_time
|
||||
image_data = get_image_from_recording(recording.path, time_in_segment)
|
||||
|
||||
height = request.args.get("height", type=int)
|
||||
codec = "png" if format == "png" else "mjpeg"
|
||||
config: FrigateConfig = current_app.frigate_config
|
||||
|
||||
image_data = get_image_from_recording(
|
||||
config.ffmpeg, recording.path, time_in_segment, codec, height
|
||||
)
|
||||
|
||||
if not image_data:
|
||||
return make_response(
|
||||
@@ -221,7 +235,7 @@ def get_snapshot_from_recording(camera_name: str, frame_time: str):
|
||||
)
|
||||
|
||||
response = make_response(image_data)
|
||||
response.headers["Content-Type"] = "image/png"
|
||||
response.headers["Content-Type"] = f"image/{format}"
|
||||
return response
|
||||
except DoesNotExist:
|
||||
return make_response(
|
||||
@@ -261,9 +275,12 @@ def submit_recording_snapshot_to_plus(camera_name: str, frame_time: str):
|
||||
)
|
||||
|
||||
try:
|
||||
config: FrigateConfig = current_app.frigate_config
|
||||
recording: Recordings = recording_query.get()
|
||||
time_in_segment = frame_time - recording.start_time
|
||||
image_data = get_image_from_recording(recording.path, time_in_segment)
|
||||
image_data = get_image_from_recording(
|
||||
config.ffmpeg, recording.path, time_in_segment, "png"
|
||||
)
|
||||
|
||||
if not image_data:
|
||||
return make_response(
|
||||
@@ -462,9 +479,11 @@ def recording_clip(camera_name, start_ts, end_ts):
|
||||
file_name = secure_filename(file_name)
|
||||
path = os.path.join(CLIPS_DIR, f"cache/{file_name}")
|
||||
|
||||
config: FrigateConfig = current_app.frigate_config
|
||||
|
||||
if not os.path.exists(path):
|
||||
ffmpeg_cmd = [
|
||||
"ffmpeg",
|
||||
config.ffmpeg.ffmpeg_path,
|
||||
"-hide_banner",
|
||||
"-y",
|
||||
"-protocol_whitelist",
|
||||
@@ -585,7 +604,8 @@ def vod_ts(camera_name, start_ts, end_ts):
|
||||
)
|
||||
|
||||
|
||||
@MediaBp.route("/vod/<year_month>/<day>/<hour>/<camera_name>")
|
||||
@MediaBp.route("/vod/<year_month>/<int:day>/<int:hour>/<camera_name>")
|
||||
@MediaBp.route("/vod/<year_month>/<float:day>/<float:hour>/<camera_name>")
|
||||
def vod_hour_no_timezone(year_month, day, hour, camera_name):
|
||||
return vod_hour(
|
||||
year_month, day, hour, camera_name, get_localzone_name().replace("/", ",")
|
||||
@@ -1128,8 +1148,9 @@ def preview_gif(camera_name: str, start_ts, end_ts, max_cache_age=2592000):
|
||||
diff = start_ts - preview.start_time
|
||||
minutes = int(diff / 60)
|
||||
seconds = int(diff % 60)
|
||||
config: FrigateConfig = current_app.frigate_config
|
||||
ffmpeg_cmd = [
|
||||
"ffmpeg",
|
||||
config.ffmpeg.ffmpeg_path,
|
||||
"-hide_banner",
|
||||
"-loglevel",
|
||||
"warning",
|
||||
@@ -1193,9 +1214,10 @@ def preview_gif(camera_name: str, start_ts, end_ts, max_cache_age=2592000):
|
||||
|
||||
last_file = selected_previews[-2]
|
||||
selected_previews.append(last_file)
|
||||
config: FrigateConfig = current_app.frigate_config
|
||||
|
||||
ffmpeg_cmd = [
|
||||
"ffmpeg",
|
||||
config.ffmpeg.ffmpeg_path,
|
||||
"-hide_banner",
|
||||
"-loglevel",
|
||||
"warning",
|
||||
@@ -1288,8 +1310,9 @@ def preview_mp4(camera_name: str, start_ts, end_ts, max_cache_age=604800):
|
||||
diff = start_ts - preview.start_time
|
||||
minutes = int(diff / 60)
|
||||
seconds = int(diff % 60)
|
||||
config: FrigateConfig = current_app.frigate_config
|
||||
ffmpeg_cmd = [
|
||||
"ffmpeg",
|
||||
config.ffmpeg.ffmpeg_path,
|
||||
"-hide_banner",
|
||||
"-loglevel",
|
||||
"warning",
|
||||
@@ -1351,9 +1374,10 @@ def preview_mp4(camera_name: str, start_ts, end_ts, max_cache_age=604800):
|
||||
|
||||
last_file = selected_previews[-2]
|
||||
selected_previews.append(last_file)
|
||||
config: FrigateConfig = current_app.frigate_config
|
||||
|
||||
ffmpeg_cmd = [
|
||||
"ffmpeg",
|
||||
config.ffmpeg.ffmpeg_path,
|
||||
"-hide_banner",
|
||||
"-loglevel",
|
||||
"warning",
|
||||
|
||||
@@ -75,7 +75,10 @@ def preview_ts(camera_name, start_ts, end_ts):
|
||||
return make_response(jsonify(clips), 200)
|
||||
|
||||
|
||||
@PreviewBp.route("/preview/<year_month>/<day>/<hour>/<camera_name>/<tz_name>")
|
||||
@PreviewBp.route("/preview/<year_month>/<int:day>/<int:hour>/<camera_name>/<tz_name>")
|
||||
@PreviewBp.route(
|
||||
"/preview/<year_month>/<float:day>/<float:hour>/<camera_name>/<tz_name>"
|
||||
)
|
||||
def preview_hour(year_month, day, hour, camera_name, tz_name):
|
||||
parts = year_month.split("-")
|
||||
start_date = (
|
||||
|
||||
@@ -94,6 +94,18 @@ def review():
|
||||
return jsonify([r for r in review])
|
||||
|
||||
|
||||
@ReviewBp.route("/review/event/<id>")
|
||||
def get_review_from_event(id: str):
|
||||
try:
|
||||
return model_to_dict(
|
||||
ReviewSegment.get(
|
||||
ReviewSegment.data["detections"].cast("text") % f'*"{id}"*'
|
||||
)
|
||||
)
|
||||
except DoesNotExist:
|
||||
return "Review item not found", 404
|
||||
|
||||
|
||||
@ReviewBp.route("/review/<id>")
|
||||
def get_review(id: str):
|
||||
try:
|
||||
|
||||
@@ -43,7 +43,7 @@ from frigate.events.audio import listen_to_audio
|
||||
from frigate.events.cleanup import EventCleanup
|
||||
from frigate.events.external import ExternalEventProcessor
|
||||
from frigate.events.maintainer import EventProcessor
|
||||
from frigate.log import log_process, root_configurer
|
||||
from frigate.log import log_thread
|
||||
from frigate.models import (
|
||||
Event,
|
||||
Export,
|
||||
@@ -113,15 +113,6 @@ class FrigateApp:
|
||||
else:
|
||||
logger.debug(f"Skipping directory: {d}")
|
||||
|
||||
def init_logger(self) -> None:
|
||||
self.log_process = mp.Process(
|
||||
target=log_process, args=(self.log_queue,), name="log_process"
|
||||
)
|
||||
self.log_process.daemon = True
|
||||
self.log_process.start()
|
||||
self.processes["logger"] = self.log_process.pid or 0
|
||||
root_configurer(self.log_queue)
|
||||
|
||||
def init_config(self) -> None:
|
||||
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
|
||||
|
||||
@@ -374,7 +365,7 @@ class FrigateApp:
|
||||
except PermissionError:
|
||||
logger.error("Unable to write to /config to save export state")
|
||||
|
||||
migrate_exports(self.config.cameras.keys())
|
||||
migrate_exports(self.config.ffmpeg, self.config.cameras.keys())
|
||||
|
||||
def init_external_event_processor(self) -> None:
|
||||
self.external_event_processor = ExternalEventProcessor(self.config)
|
||||
@@ -406,7 +397,7 @@ class FrigateApp:
|
||||
if self.config.mqtt.enabled:
|
||||
comms.append(MqttClient(self.config))
|
||||
|
||||
if self.config.notifications.enabled:
|
||||
if self.config.notifications.enabled_in_config:
|
||||
comms.append(WebPushClient(self.config))
|
||||
|
||||
comms.append(WebSocketClient(self.config))
|
||||
@@ -537,7 +528,7 @@ class FrigateApp:
|
||||
capture_process = mp.Process(
|
||||
target=capture_camera,
|
||||
name=f"camera_capture:{name}",
|
||||
args=(name, config, self.camera_metrics[name]),
|
||||
args=(name, config, self.shm_frame_count, self.camera_metrics[name]),
|
||||
)
|
||||
capture_process.daemon = True
|
||||
self.camera_metrics[name]["capture_process"] = capture_process
|
||||
@@ -601,19 +592,34 @@ class FrigateApp:
|
||||
self.frigate_watchdog.start()
|
||||
|
||||
def check_shm(self) -> None:
|
||||
available_shm = round(shutil.disk_usage("/dev/shm").total / pow(2, 20), 1)
|
||||
min_req_shm = 30
|
||||
total_shm = round(shutil.disk_usage("/dev/shm").total / pow(2, 20), 1)
|
||||
|
||||
for _, camera in self.config.cameras.items():
|
||||
min_req_shm += round(
|
||||
(camera.detect.width * camera.detect.height * 1.5 * 9 + 270480)
|
||||
/ 1048576,
|
||||
1,
|
||||
)
|
||||
# required for log files + nginx cache
|
||||
min_req_shm = 40 + 10
|
||||
|
||||
if available_shm < min_req_shm:
|
||||
if self.config.birdseye.restream:
|
||||
min_req_shm += 8
|
||||
|
||||
available_shm = total_shm - min_req_shm
|
||||
cam_total_frame_size = 0
|
||||
|
||||
for camera in self.config.cameras.values():
|
||||
if camera.enabled:
|
||||
cam_total_frame_size += round(
|
||||
(camera.detect.width * camera.detect.height * 1.5 + 270480)
|
||||
/ 1048576,
|
||||
1,
|
||||
)
|
||||
|
||||
self.shm_frame_count = min(50, int(available_shm / (cam_total_frame_size)))
|
||||
|
||||
logger.debug(
|
||||
f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {self.shm_frame_count} frames for each camera in SHM"
|
||||
)
|
||||
|
||||
if self.shm_frame_count < 10:
|
||||
logger.warning(
|
||||
f"The current SHM size of {available_shm}MB is too small, recommend increasing it to at least {min_req_shm}MB."
|
||||
f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size)}MB."
|
||||
)
|
||||
|
||||
def init_auth(self) -> None:
|
||||
@@ -652,6 +658,7 @@ class FrigateApp:
|
||||
logger.info("********************************************************")
|
||||
logger.info("********************************************************")
|
||||
|
||||
@log_thread()
|
||||
def start(self) -> None:
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="Frigate",
|
||||
@@ -660,7 +667,6 @@ class FrigateApp:
|
||||
parser.add_argument("--validate-config", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
self.init_logger()
|
||||
logger.info(f"Starting Frigate ({VERSION})")
|
||||
|
||||
try:
|
||||
@@ -687,13 +693,11 @@ class FrigateApp:
|
||||
print("*************************************************************")
|
||||
print("*** End Config Validation Errors ***")
|
||||
print("*************************************************************")
|
||||
self.log_process.terminate()
|
||||
sys.exit(1)
|
||||
if args.validate_config:
|
||||
print("*************************************************************")
|
||||
print("*** Your config file is valid. ***")
|
||||
print("*************************************************************")
|
||||
self.log_process.terminate()
|
||||
sys.exit(0)
|
||||
self.set_environment_vars()
|
||||
self.set_log_levels()
|
||||
@@ -710,7 +714,6 @@ class FrigateApp:
|
||||
self.init_dispatcher()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
self.log_process.terminate()
|
||||
sys.exit(1)
|
||||
self.start_detectors()
|
||||
self.start_video_output_processor()
|
||||
@@ -718,6 +721,7 @@ class FrigateApp:
|
||||
self.init_historical_regions()
|
||||
self.start_detected_frames_processor()
|
||||
self.start_camera_processors()
|
||||
self.check_shm()
|
||||
self.start_camera_capture_processes()
|
||||
self.start_audio_processors()
|
||||
self.start_storage_maintainer()
|
||||
@@ -729,7 +733,6 @@ class FrigateApp:
|
||||
self.start_event_cleanup()
|
||||
self.start_record_cleanup()
|
||||
self.start_watchdog()
|
||||
self.check_shm()
|
||||
self.init_auth()
|
||||
|
||||
# Flask only listens for SIGINT, so we need to catch SIGTERM and send SIGINT
|
||||
@@ -833,7 +836,4 @@ class FrigateApp:
|
||||
shm.close()
|
||||
shm.unlink()
|
||||
|
||||
self.log_process.terminate()
|
||||
self.log_process.join()
|
||||
|
||||
os._exit(os.EX_OK)
|
||||
|
||||
@@ -75,6 +75,9 @@ class Dispatcher:
|
||||
"birdseye": self._on_birdseye_command,
|
||||
"birdseye_mode": self._on_birdseye_mode_command,
|
||||
}
|
||||
self._global_settings_handlers: dict[str, Callable] = {
|
||||
"notifications": self._on_notification_command,
|
||||
}
|
||||
|
||||
for comm in self.comms:
|
||||
comm.subscribe(self._receive)
|
||||
@@ -86,9 +89,13 @@ class Dispatcher:
|
||||
if topic.endswith("set"):
|
||||
try:
|
||||
# example /cam_name/detect/set payload=ON|OFF
|
||||
camera_name = topic.split("/")[-3]
|
||||
command = topic.split("/")[-2]
|
||||
self._camera_settings_handlers[command](camera_name, payload)
|
||||
if topic.count("/") == 2:
|
||||
camera_name = topic.split("/")[-3]
|
||||
command = topic.split("/")[-2]
|
||||
self._camera_settings_handlers[command](camera_name, payload)
|
||||
elif topic.count("/") == 1:
|
||||
command = topic.split("/")[-2]
|
||||
self._global_settings_handlers[command](payload)
|
||||
except IndexError:
|
||||
logger.error(f"Received invalid set command: {topic}")
|
||||
return
|
||||
@@ -282,6 +289,18 @@ class Dispatcher:
|
||||
self.config_updater.publish(f"config/motion/{camera_name}", motion_settings)
|
||||
self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True)
|
||||
|
||||
def _on_notification_command(self, payload: str) -> None:
|
||||
"""Callback for notification topic."""
|
||||
if payload != "ON" and payload != "OFF":
|
||||
f"Received unsupported value for notification: {payload}"
|
||||
return
|
||||
|
||||
notification_settings = self.config.notifications
|
||||
logger.info(f"Setting notifications: {payload}")
|
||||
notification_settings.enabled = payload == "ON" # type: ignore[union-attr]
|
||||
self.config_updater.publish("config/notifications", notification_settings)
|
||||
self.publish("notifications/state", payload, retain=True)
|
||||
|
||||
def _on_audio_command(self, camera_name: str, payload: str) -> None:
|
||||
"""Callback for audio topic."""
|
||||
audio_settings = self.config.cameras[camera_name].audio
|
||||
|
||||
@@ -105,6 +105,13 @@ class MqttClient(Communicator): # type: ignore[misc]
|
||||
retain=True,
|
||||
)
|
||||
|
||||
if self.config.notifications.enabled_in_config:
|
||||
self.publish(
|
||||
"notifications/state",
|
||||
"ON" if self.config.notifications.enabled else "OFF",
|
||||
retain=True,
|
||||
)
|
||||
|
||||
self.publish("available", "online", retain=True)
|
||||
|
||||
def on_mqtt_command(
|
||||
@@ -209,6 +216,12 @@ class MqttClient(Communicator): # type: ignore[misc]
|
||||
self.on_mqtt_command,
|
||||
)
|
||||
|
||||
if self.config.notifications.enabled_in_config:
|
||||
self.client.message_callback_add(
|
||||
f"{self.mqtt_config.topic_prefix}/notifications/set",
|
||||
self.on_mqtt_command,
|
||||
)
|
||||
|
||||
self.client.message_callback_add(
|
||||
f"{self.mqtt_config.topic_prefix}/restart", self.on_mqtt_command
|
||||
)
|
||||
|
||||
@@ -9,6 +9,7 @@ from typing import Any, Callable
|
||||
from py_vapid import Vapid01
|
||||
from pywebpush import WebPusher
|
||||
|
||||
from frigate.comms.config_updater import ConfigSubscriber
|
||||
from frigate.comms.dispatcher import Communicator
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import CONFIG_DIR
|
||||
@@ -41,6 +42,9 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
||||
for sub in user["notification_tokens"]:
|
||||
self.web_pushers[user["username"]].append(WebPusher(sub))
|
||||
|
||||
# notification config updater
|
||||
self.config_subscriber = ConfigSubscriber("config/notifications")
|
||||
|
||||
def subscribe(self, receiver: Callable) -> None:
|
||||
"""Wrapper for allowing dispatcher to subscribe."""
|
||||
pass
|
||||
@@ -101,6 +105,15 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
||||
|
||||
def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
|
||||
"""Wrapper for publishing when client is in valid state."""
|
||||
# check for updated notification config
|
||||
_, updated_notification_config = self.config_subscriber.check_for_update()
|
||||
|
||||
if updated_notification_config:
|
||||
self.config.notifications = updated_notification_config
|
||||
|
||||
if not self.config.notifications.enabled:
|
||||
return
|
||||
|
||||
if topic == "reviews":
|
||||
self.send_alert(json.loads(payload))
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ from __future__ import annotations
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
@@ -25,7 +26,9 @@ from frigate.const import (
|
||||
CACHE_DIR,
|
||||
CACHE_SEGMENT_FORMAT,
|
||||
DEFAULT_DB_PATH,
|
||||
DEFAULT_FFMPEG_VERSION,
|
||||
FREQUENCY_STATS_POINTS,
|
||||
INCLUDED_FFMPEG_VERSIONS,
|
||||
MAX_PRE_CAPTURE,
|
||||
REGEX_CAMERA_NAME,
|
||||
YAML_EXT,
|
||||
@@ -172,6 +175,9 @@ class AuthConfig(FrigateBaseModel):
|
||||
class NotificationConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable notifications")
|
||||
email: Optional[str] = Field(default=None, title="Email required for push.")
|
||||
enabled_in_config: Optional[bool] = Field(
|
||||
default=None, title="Keep track of original state of notifications."
|
||||
)
|
||||
|
||||
|
||||
class StatsConfig(FrigateBaseModel):
|
||||
@@ -296,12 +302,14 @@ class RetainModeEnum(str, Enum):
|
||||
active_objects = "active_objects"
|
||||
|
||||
|
||||
class RetainConfig(FrigateBaseModel):
|
||||
default: float = Field(default=10, title="Default retention period.")
|
||||
class RecordRetainConfig(FrigateBaseModel):
|
||||
days: float = Field(default=0, title="Default retention period.")
|
||||
mode: RetainModeEnum = Field(default=RetainModeEnum.all, title="Retain mode.")
|
||||
|
||||
|
||||
class ReviewRetainConfig(FrigateBaseModel):
|
||||
days: float = Field(default=10, title="Default retention period.")
|
||||
mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
|
||||
objects: Dict[str, float] = Field(
|
||||
default_factory=dict, title="Object retention period."
|
||||
)
|
||||
|
||||
|
||||
class EventsConfig(FrigateBaseModel):
|
||||
@@ -309,18 +317,9 @@ class EventsConfig(FrigateBaseModel):
|
||||
default=5, title="Seconds to retain before event starts.", le=MAX_PRE_CAPTURE
|
||||
)
|
||||
post_capture: int = Field(default=5, title="Seconds to retain after event ends.")
|
||||
objects: Optional[List[str]] = Field(
|
||||
None,
|
||||
title="List of objects to be detected in order to save the event.",
|
||||
retain: ReviewRetainConfig = Field(
|
||||
default_factory=ReviewRetainConfig, title="Event retention settings."
|
||||
)
|
||||
retain: RetainConfig = Field(
|
||||
default_factory=RetainConfig, title="Event retention settings."
|
||||
)
|
||||
|
||||
|
||||
class RecordRetainConfig(FrigateBaseModel):
|
||||
days: float = Field(default=0, title="Default retention period.")
|
||||
mode: RetainModeEnum = Field(default=RetainModeEnum.all, title="Retain mode.")
|
||||
|
||||
|
||||
class RecordExportConfig(FrigateBaseModel):
|
||||
@@ -355,8 +354,11 @@ class RecordConfig(FrigateBaseModel):
|
||||
retain: RecordRetainConfig = Field(
|
||||
default_factory=RecordRetainConfig, title="Record retention settings."
|
||||
)
|
||||
events: EventsConfig = Field(
|
||||
default_factory=EventsConfig, title="Event specific settings."
|
||||
detections: EventsConfig = Field(
|
||||
default_factory=EventsConfig, title="Detection specific retention settings."
|
||||
)
|
||||
alerts: EventsConfig = Field(
|
||||
default_factory=EventsConfig, title="Alert specific retention settings."
|
||||
)
|
||||
export: RecordExportConfig = Field(
|
||||
default_factory=RecordExportConfig, title="Recording Export Config"
|
||||
@@ -763,8 +765,14 @@ class GenAIConfig(FrigateBaseModel):
|
||||
object_prompts: Dict[str, str] = Field(default={}, title="Object specific prompts.")
|
||||
|
||||
|
||||
class GenAICameraConfig(FrigateBaseModel):
|
||||
# uses BaseModel because some global attributes are not available at the camera level
|
||||
class GenAICameraConfig(BaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable GenAI for camera.")
|
||||
prompt: str = Field(
|
||||
default="Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background.",
|
||||
title="Default caption prompt.",
|
||||
)
|
||||
object_prompts: Dict[str, str] = Field(default={}, title="Object specific prompts.")
|
||||
|
||||
|
||||
class AudioConfig(FrigateBaseModel):
|
||||
@@ -867,6 +875,7 @@ class FfmpegOutputArgsConfig(FrigateBaseModel):
|
||||
|
||||
|
||||
class FfmpegConfig(FrigateBaseModel):
|
||||
path: str = Field(default="default", title="FFmpeg path")
|
||||
global_args: Union[str, List[str]] = Field(
|
||||
default=FFMPEG_GLOBAL_ARGS_DEFAULT, title="Global FFmpeg arguments."
|
||||
)
|
||||
@@ -885,6 +894,30 @@ class FfmpegConfig(FrigateBaseModel):
|
||||
title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
|
||||
)
|
||||
|
||||
@property
|
||||
def ffmpeg_path(self) -> str:
|
||||
if self.path == "default":
|
||||
if shutil.which("ffmpeg") is None:
|
||||
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
|
||||
else:
|
||||
return "ffmpeg"
|
||||
elif self.path in INCLUDED_FFMPEG_VERSIONS:
|
||||
return f"/usr/lib/ffmpeg/{self.path}/bin/ffmpeg"
|
||||
else:
|
||||
return f"{self.path}/bin/ffmpeg"
|
||||
|
||||
@property
|
||||
def ffprobe_path(self) -> str:
|
||||
if self.path == "default":
|
||||
if shutil.which("ffprobe") is None:
|
||||
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe"
|
||||
else:
|
||||
return "ffprobe"
|
||||
elif self.path in INCLUDED_FFMPEG_VERSIONS:
|
||||
return f"/usr/lib/ffmpeg/{self.path}/bin/ffprobe"
|
||||
else:
|
||||
return f"{self.path}/bin/ffprobe"
|
||||
|
||||
|
||||
class CameraRoleEnum(str, Enum):
|
||||
audio = "audio"
|
||||
@@ -924,6 +957,14 @@ class CameraFfmpegConfig(FfmpegConfig):
|
||||
return v
|
||||
|
||||
|
||||
class RetainConfig(FrigateBaseModel):
|
||||
default: float = Field(default=10, title="Default retention period.")
|
||||
mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
|
||||
objects: Dict[str, float] = Field(
|
||||
default_factory=dict, title="Object retention period."
|
||||
)
|
||||
|
||||
|
||||
class SnapshotsConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Snapshots enabled.")
|
||||
clean_copy: bool = Field(
|
||||
@@ -1151,7 +1192,7 @@ class CameraConfig(FrigateBaseModel):
|
||||
+ ffmpeg_output_args
|
||||
)
|
||||
|
||||
# if there arent any outputs enabled for this input
|
||||
# if there aren't any outputs enabled for this input
|
||||
if len(ffmpeg_output_args) == 0:
|
||||
return None
|
||||
|
||||
@@ -1187,9 +1228,9 @@ class CameraConfig(FrigateBaseModel):
|
||||
)
|
||||
|
||||
cmd = (
|
||||
["ffmpeg"]
|
||||
[self.ffmpeg.ffmpeg_path]
|
||||
+ global_args
|
||||
+ hwaccel_args
|
||||
+ (hwaccel_args if "detect" in ffmpeg_input.roles else [])
|
||||
+ input_args
|
||||
+ ["-i", escape_special_characters(ffmpeg_input.path)]
|
||||
+ ffmpeg_output_args
|
||||
@@ -1278,10 +1319,19 @@ def verify_recording_retention(camera_config: CameraConfig) -> None:
|
||||
if (
|
||||
camera_config.record.retain.days != 0
|
||||
and rank_map[camera_config.record.retain.mode]
|
||||
> rank_map[camera_config.record.events.retain.mode]
|
||||
> rank_map[camera_config.record.alerts.retain.mode]
|
||||
):
|
||||
logger.warning(
|
||||
f"{camera_config.name}: Recording retention is configured for {camera_config.record.retain.mode} and event retention is configured for {camera_config.record.events.retain.mode}. The more restrictive retention policy will be applied."
|
||||
f"{camera_config.name}: Recording retention is configured for {camera_config.record.retain.mode} and alert retention is configured for {camera_config.record.alerts.retain.mode}. The more restrictive retention policy will be applied."
|
||||
)
|
||||
|
||||
if (
|
||||
camera_config.record.retain.days != 0
|
||||
and rank_map[camera_config.record.retain.mode]
|
||||
> rank_map[camera_config.record.detections.retain.mode]
|
||||
):
|
||||
logger.warning(
|
||||
f"{camera_config.name}: Recording retention is configured for {camera_config.record.retain.mode} and detection retention is configured for {camera_config.record.detections.retain.mode}. The more restrictive retention policy will be applied."
|
||||
)
|
||||
|
||||
|
||||
@@ -1429,7 +1479,7 @@ class FrigateConfig(FrigateBaseModel):
|
||||
default_factory=TimestampStyleConfig,
|
||||
title="Global timestamp style configuration.",
|
||||
)
|
||||
version: Optional[float] = Field(default=None, title="Current config version.")
|
||||
version: Optional[str] = Field(default=None, title="Current config version.")
|
||||
|
||||
def runtime_config(self, plus_api: PlusApi = None) -> FrigateConfig:
|
||||
"""Merge camera config with globals."""
|
||||
@@ -1446,6 +1496,9 @@ class FrigateConfig(FrigateBaseModel):
|
||||
config.mqtt.user = config.mqtt.user.format(**FRIGATE_ENV_VARS)
|
||||
config.mqtt.password = config.mqtt.password.format(**FRIGATE_ENV_VARS)
|
||||
|
||||
# set notifications state
|
||||
config.notifications.enabled_in_config = config.notifications.enabled
|
||||
|
||||
# GenAI substitution
|
||||
if config.genai.api_key:
|
||||
config.genai.api_key = config.genai.api_key.format(**FRIGATE_ENV_VARS)
|
||||
@@ -1471,7 +1524,7 @@ class FrigateConfig(FrigateBaseModel):
|
||||
"live": ...,
|
||||
"objects": ...,
|
||||
"review": ...,
|
||||
"genai": {"enabled"},
|
||||
"genai": ...,
|
||||
"motion": ...,
|
||||
"detect": ...,
|
||||
"ffmpeg": ...,
|
||||
@@ -1501,7 +1554,9 @@ class FrigateConfig(FrigateBaseModel):
|
||||
if need_detect_dimensions or need_record_fourcc:
|
||||
stream_info = {"width": 0, "height": 0, "fourcc": None}
|
||||
try:
|
||||
stream_info = stream_info_retriever.get_stream_info(input.path)
|
||||
stream_info = stream_info_retriever.get_stream_info(
|
||||
config.ffmpeg, input.path
|
||||
)
|
||||
except Exception:
|
||||
logger.warn(
|
||||
f"Error detecting stream parameters automatically for {input.path} Applying default values."
|
||||
|
||||
@@ -12,7 +12,7 @@ FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
|
||||
PLUS_ENV_VAR = "PLUS_API_KEY"
|
||||
PLUS_API_HOST = "https://api.frigate.video"
|
||||
|
||||
# Attribute & Object Consts
|
||||
# Attribute & Object constants
|
||||
|
||||
ATTRIBUTE_LABEL_MAP = {
|
||||
"person": ["face", "amazon"],
|
||||
@@ -31,7 +31,7 @@ LABEL_NMS_MAP = {
|
||||
}
|
||||
LABEL_NMS_DEFAULT = 0.4
|
||||
|
||||
# Audio Consts
|
||||
# Audio constants
|
||||
|
||||
AUDIO_DURATION = 0.975
|
||||
AUDIO_FORMAT = "s16le"
|
||||
@@ -39,16 +39,19 @@ AUDIO_MAX_BIT_RANGE = 32768.0
|
||||
AUDIO_SAMPLE_RATE = 16000
|
||||
AUDIO_MIN_CONFIDENCE = 0.5
|
||||
|
||||
# DB Consts
|
||||
# DB constants
|
||||
|
||||
MAX_WAL_SIZE = 10 # MB
|
||||
|
||||
# Ffmpeg Presets
|
||||
# Ffmpeg constants
|
||||
|
||||
DEFAULT_FFMPEG_VERSION = "7.0"
|
||||
INCLUDED_FFMPEG_VERSIONS = ["7.0", "5.0"]
|
||||
FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
|
||||
FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
|
||||
FFMPEG_HWACCEL_VULKAN = "preset-vulkan"
|
||||
|
||||
# Regex Consts
|
||||
# Regex constants
|
||||
|
||||
REGEX_CAMERA_NAME = r"^[a-zA-Z0-9_-]+$"
|
||||
REGEX_RTSP_CAMERA_USER_PASS = r":\/\/[a-zA-Z0-9_-]+:[\S]+@"
|
||||
|
||||
@@ -24,7 +24,6 @@ from typing_extensions import Literal
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from frigate.detectors.util import preprocess # Assuming this function is available
|
||||
|
||||
# Set up logging
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -83,11 +82,11 @@ class HailoDetector(DetectionApi):
|
||||
self.network_group_params = self.network_group.create_params()
|
||||
|
||||
# Create input and output virtual stream parameters
|
||||
self.input_vstreams_params = InputVStreamParams.make(
|
||||
self.input_vstream_params = InputVStreamParams.make(
|
||||
self.network_group,
|
||||
format_type=self.hef.get_input_vstream_infos()[0].format.type,
|
||||
)
|
||||
self.output_vstreams_params = OutputVStreamParams.make(
|
||||
self.output_vstream_params = OutputVStreamParams.make(
|
||||
self.network_group, format_type=getattr(FormatType, output_type)
|
||||
)
|
||||
|
||||
@@ -146,24 +145,16 @@ class HailoDetector(DetectionApi):
|
||||
f"[detect_raw] Converted tensor_input to numpy array: shape {tensor_input.shape}"
|
||||
)
|
||||
|
||||
# Preprocess the tensor input using Frigate's preprocess function
|
||||
processed_tensor = preprocess(
|
||||
tensor_input, (1, self.h8l_model_height, self.h8l_model_width, 3), np.uint8
|
||||
)
|
||||
input_data = tensor_input
|
||||
logger.debug(
|
||||
f"[detect_raw] Tensor data and shape after preprocessing: {processed_tensor} {processed_tensor.shape}"
|
||||
)
|
||||
|
||||
input_data = processed_tensor
|
||||
logger.debug(
|
||||
f"[detect_raw] Input data for inference shape: {processed_tensor.shape}, dtype: {processed_tensor.dtype}"
|
||||
f"[detect_raw] Input data for inference shape: {tensor_input.shape}, dtype: {tensor_input.dtype}"
|
||||
)
|
||||
|
||||
try:
|
||||
with InferVStreams(
|
||||
self.network_group,
|
||||
self.input_vstreams_params,
|
||||
self.output_vstreams_params,
|
||||
self.input_vstream_params,
|
||||
self.output_vstream_params,
|
||||
) as infer_pipeline:
|
||||
input_dict = {}
|
||||
if isinstance(input_data, dict):
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from pydantic import Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from frigate.detectors.util import preprocess
|
||||
from frigate.detectors.detector_config import (
|
||||
BaseDetectorConfig,
|
||||
ModelTypeEnum,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -14,6 +18,7 @@ DETECTOR_KEY = "onnx"
|
||||
|
||||
class ONNXDetectorConfig(BaseDetectorConfig):
|
||||
type: Literal[DETECTOR_KEY]
|
||||
device: str = Field(default="AUTO", title="Device Type")
|
||||
|
||||
|
||||
class ONNXDetector(DetectionApi):
|
||||
@@ -21,7 +26,7 @@ class ONNXDetector(DetectionApi):
|
||||
|
||||
def __init__(self, detector_config: ONNXDetectorConfig):
|
||||
try:
|
||||
import onnxruntime
|
||||
import onnxruntime as ort
|
||||
|
||||
logger.info("ONNX: loaded onnxruntime module")
|
||||
except ModuleNotFoundError:
|
||||
@@ -32,16 +37,79 @@ class ONNXDetector(DetectionApi):
|
||||
|
||||
path = detector_config.model.path
|
||||
logger.info(f"ONNX: loading {detector_config.model.path}")
|
||||
self.model = onnxruntime.InferenceSession(path)
|
||||
|
||||
providers = (
|
||||
["CPUExecutionProvider"]
|
||||
if detector_config.device == "CPU"
|
||||
else ort.get_available_providers()
|
||||
)
|
||||
options = []
|
||||
|
||||
for provider in providers:
|
||||
if provider == "TensorrtExecutionProvider":
|
||||
os.makedirs(
|
||||
"/config/model_cache/tensorrt/ort/trt-engines", exist_ok=True
|
||||
)
|
||||
options.append(
|
||||
{
|
||||
"trt_timing_cache_enable": True,
|
||||
"trt_timing_cache_path": "/config/model_cache/tensorrt/ort",
|
||||
"trt_engine_cache_enable": True,
|
||||
"trt_dump_ep_context_model": True,
|
||||
"trt_engine_cache_path": "/config/model_cache/tensorrt/ort/trt-engines",
|
||||
"trt_ep_context_file_path": "/config/model_cache/tensorrt/ort",
|
||||
}
|
||||
)
|
||||
elif provider == "OpenVINOExecutionProvider":
|
||||
os.makedirs("/config/model_cache/openvino/ort", exist_ok=True)
|
||||
options.append(
|
||||
{
|
||||
"cache_dir": "/config/model_cache/openvino/ort",
|
||||
"device_type": detector_config.device,
|
||||
}
|
||||
)
|
||||
else:
|
||||
options.append({})
|
||||
|
||||
self.model = ort.InferenceSession(
|
||||
path, providers=providers, provider_options=options
|
||||
)
|
||||
|
||||
self.h = detector_config.model.height
|
||||
self.w = detector_config.model.width
|
||||
self.onnx_model_type = detector_config.model.model_type
|
||||
self.onnx_model_px = detector_config.model.input_pixel_format
|
||||
self.onnx_model_shape = detector_config.model.input_tensor
|
||||
path = detector_config.model.path
|
||||
|
||||
logger.info(f"ONNX: {path} loaded")
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
model_input_name = self.model.get_inputs()[0].name
|
||||
model_input_shape = self.model.get_inputs()[0].shape
|
||||
tensor_input = preprocess(tensor_input, model_input_shape, np.float32)
|
||||
# ruff: noqa: F841
|
||||
tensor_output = self.model.run(None, {model_input_name: tensor_input})[0]
|
||||
tensor_output = self.model.run(None, {model_input_name: tensor_input})
|
||||
|
||||
raise Exception(
|
||||
"No models are currently supported via onnx. See the docs for more info."
|
||||
)
|
||||
if self.onnx_model_type == ModelTypeEnum.yolonas:
|
||||
predictions = tensor_output[0]
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
for i, prediction in enumerate(predictions):
|
||||
if i == 20:
|
||||
break
|
||||
(_, x_min, y_min, x_max, y_max, confidence, class_id) = prediction
|
||||
# when running in GPU mode, empty predictions in the output have class_id of -1
|
||||
if class_id < 0:
|
||||
break
|
||||
detections[i] = [
|
||||
class_id,
|
||||
confidence,
|
||||
y_min / self.h,
|
||||
x_min / self.w,
|
||||
y_max / self.h,
|
||||
x_max / self.w,
|
||||
]
|
||||
return detections
|
||||
else:
|
||||
raise Exception(
|
||||
f"{self.onnx_model_type} is currently not supported for rocm. See the docs for more info on supported models."
|
||||
)
|
||||
|
||||
@@ -30,12 +30,6 @@ class OvDetector(DetectionApi):
|
||||
self.h = detector_config.model.height
|
||||
self.w = detector_config.model.width
|
||||
|
||||
if detector_config.device == "AUTO":
|
||||
logger.warning(
|
||||
"OpenVINO AUTO device type is not currently supported. Attempting to use GPU instead."
|
||||
)
|
||||
detector_config.device = "GPU"
|
||||
|
||||
if not os.path.isfile(detector_config.model.path):
|
||||
logger.error(f"OpenVino model file {detector_config.model.path} not found.")
|
||||
raise FileNotFoundError
|
||||
@@ -129,10 +123,10 @@ class OvDetector(DetectionApi):
|
||||
|
||||
strides = [8, 16, 32]
|
||||
|
||||
hsizes = [self.h // stride for stride in strides]
|
||||
wsizes = [self.w // stride for stride in strides]
|
||||
hsize_list = [self.h // stride for stride in strides]
|
||||
wsize_list = [self.w // stride for stride in strides]
|
||||
|
||||
for hsize, wsize, stride in zip(hsizes, wsizes, strides):
|
||||
for hsize, wsize, stride in zip(hsize_list, wsize_list, strides):
|
||||
xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
|
||||
grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
|
||||
grids.append(grid)
|
||||
@@ -216,10 +210,12 @@ class OvDetector(DetectionApi):
|
||||
|
||||
conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= 0.3).squeeze()
|
||||
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
|
||||
dets = np.concatenate((image_pred[:, :5], class_conf, class_pred), axis=1)
|
||||
dets = dets[conf_mask]
|
||||
detections = np.concatenate(
|
||||
(image_pred[:, :5], class_conf, class_pred), axis=1
|
||||
)
|
||||
detections = detections[conf_mask]
|
||||
|
||||
ordered = dets[dets[:, 5].argsort()[::-1]][:20]
|
||||
ordered = detections[detections[:, 5].argsort()[::-1]][:20]
|
||||
|
||||
for i, object_detected in enumerate(ordered):
|
||||
detections[i] = self.process_yolo(
|
||||
|
||||
@@ -17,7 +17,7 @@ supported_socs = ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"]
|
||||
|
||||
supported_models = {ModelTypeEnum.yolonas: "^deci-fp16-yolonas_[sml]$"}
|
||||
|
||||
model_chache_dir = "/config/model_cache/rknn_cache/"
|
||||
model_cache_dir = "/config/model_cache/rknn_cache/"
|
||||
|
||||
|
||||
class RknnDetectorConfig(BaseDetectorConfig):
|
||||
@@ -110,7 +110,7 @@ class Rknn(DetectionApi):
|
||||
if model_matched:
|
||||
model_props["filename"] = model_path + f"-{soc}-v2.0.0-1.rknn"
|
||||
|
||||
model_props["path"] = model_chache_dir + model_props["filename"]
|
||||
model_props["path"] = model_cache_dir + model_props["filename"]
|
||||
|
||||
if not os.path.isfile(model_props["path"]):
|
||||
self.download_model(model_props["filename"])
|
||||
@@ -125,12 +125,12 @@ class Rknn(DetectionApi):
|
||||
return model_props
|
||||
|
||||
def download_model(self, filename):
|
||||
if not os.path.isdir(model_chache_dir):
|
||||
os.mkdir(model_chache_dir)
|
||||
if not os.path.isdir(model_cache_dir):
|
||||
os.mkdir(model_cache_dir)
|
||||
|
||||
urllib.request.urlretrieve(
|
||||
f"https://github.com/MarcA711/rknn-models/releases/download/v2.0.0/{filename}",
|
||||
model_chache_dir + filename,
|
||||
model_cache_dir + filename,
|
||||
)
|
||||
|
||||
def check_config(self, config):
|
||||
|
||||
@@ -9,8 +9,10 @@ from pydantic import Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from frigate.detectors.util import preprocess
|
||||
from frigate.detectors.detector_config import (
|
||||
BaseDetectorConfig,
|
||||
ModelTypeEnum,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -74,7 +76,16 @@ class ROCmDetector(DetectionApi):
|
||||
logger.error("AMD/ROCm: module loading failed, missing ROCm environment?")
|
||||
raise
|
||||
|
||||
if detector_config.conserve_cpu:
|
||||
logger.info("AMD/ROCm: switching HIP to blocking mode to conserve CPU")
|
||||
ctypes.CDLL("/opt/rocm/lib/libamdhip64.so").hipSetDeviceFlags(4)
|
||||
|
||||
self.h = detector_config.model.height
|
||||
self.w = detector_config.model.width
|
||||
self.rocm_model_type = detector_config.model.model_type
|
||||
self.rocm_model_px = detector_config.model.input_pixel_format
|
||||
path = detector_config.model.path
|
||||
|
||||
mxr_path = os.path.splitext(path)[0] + ".mxr"
|
||||
if path.endswith(".mxr"):
|
||||
logger.info(f"AMD/ROCm: loading parsed model from {mxr_path}")
|
||||
@@ -84,6 +95,7 @@ class ROCmDetector(DetectionApi):
|
||||
self.model = migraphx.load(mxr_path)
|
||||
else:
|
||||
logger.info(f"AMD/ROCm: loading model from {path}")
|
||||
|
||||
if path.endswith(".onnx"):
|
||||
self.model = migraphx.parse_onnx(path)
|
||||
elif (
|
||||
@@ -95,30 +107,51 @@ class ROCmDetector(DetectionApi):
|
||||
self.model = migraphx.parse_tf(path)
|
||||
else:
|
||||
raise Exception(f"AMD/ROCm: unknown model format {path}")
|
||||
|
||||
logger.info("AMD/ROCm: compiling the model")
|
||||
|
||||
self.model.compile(
|
||||
migraphx.get_target("gpu"), offload_copy=True, fast_math=True
|
||||
)
|
||||
|
||||
logger.info(f"AMD/ROCm: saving parsed model into {mxr_path}")
|
||||
|
||||
os.makedirs("/config/model_cache/rocm", exist_ok=True)
|
||||
migraphx.save(self.model, mxr_path)
|
||||
|
||||
logger.info("AMD/ROCm: model loaded")
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
model_input_name = self.model.get_parameter_names()[0]
|
||||
model_input_shape = tuple(
|
||||
self.model.get_parameter_shapes()[model_input_name].lens()
|
||||
)
|
||||
tensor_input = preprocess(tensor_input, model_input_shape, np.float32)
|
||||
|
||||
detector_result = self.model.run({model_input_name: tensor_input})[0]
|
||||
|
||||
addr = ctypes.cast(detector_result.data_ptr(), ctypes.POINTER(ctypes.c_float))
|
||||
# ruff: noqa: F841
|
||||
|
||||
tensor_output = np.ctypeslib.as_array(
|
||||
addr, shape=detector_result.get_shape().lens()
|
||||
)
|
||||
|
||||
raise Exception(
|
||||
"No models are currently supported for rocm. See the docs for more info."
|
||||
)
|
||||
if self.rocm_model_type == ModelTypeEnum.yolonas:
|
||||
predictions = tensor_output
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
for i, prediction in enumerate(predictions):
|
||||
if i == 20:
|
||||
break
|
||||
(_, x_min, y_min, x_max, y_max, confidence, class_id) = prediction
|
||||
# when running in GPU mode, empty predictions in the output have class_id of -1
|
||||
if class_id < 0:
|
||||
break
|
||||
detections[i] = [
|
||||
class_id,
|
||||
confidence,
|
||||
y_min / self.h,
|
||||
x_min / self.w,
|
||||
y_max / self.h,
|
||||
x_max / self.w,
|
||||
]
|
||||
return detections
|
||||
else:
|
||||
raise Exception(
|
||||
f"{self.rocm_model_type} is currently not supported for rocm. See the docs for more info on supported models."
|
||||
)
|
||||
|
||||
@@ -285,14 +285,14 @@ class TensorRtDetector(DetectionApi):
|
||||
boxes, scores, classes
|
||||
"""
|
||||
# filter low-conf detections and concatenate results of all yolo layers
|
||||
detections = []
|
||||
detection_list = []
|
||||
for o in trt_outputs:
|
||||
dets = o.reshape((-1, 7))
|
||||
dets = dets[dets[:, 4] * dets[:, 6] >= conf_th]
|
||||
detections.append(dets)
|
||||
detections = np.concatenate(detections, axis=0)
|
||||
detections = o.reshape((-1, 7))
|
||||
detections = detections[detections[:, 4] * detections[:, 6] >= conf_th]
|
||||
detection_list.append(detections)
|
||||
detection_list = np.concatenate(detection_list, axis=0)
|
||||
|
||||
return detections
|
||||
return detection_list
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
# Input tensor has the shape of the [height, width, 3]
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
import logging
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def preprocess(tensor_input, model_input_shape, model_input_element_type):
|
||||
model_input_shape = tuple(model_input_shape)
|
||||
assert tensor_input.dtype == np.uint8, f"tensor_input.dtype: {tensor_input.dtype}"
|
||||
if len(tensor_input.shape) == 3:
|
||||
tensor_input = tensor_input[np.newaxis, :]
|
||||
if model_input_element_type == np.uint8:
|
||||
# nothing to do for uint8 model input
|
||||
assert (
|
||||
model_input_shape == tensor_input.shape
|
||||
), f"model_input_shape: {model_input_shape}, tensor_input.shape: {tensor_input.shape}"
|
||||
return tensor_input
|
||||
assert (
|
||||
model_input_element_type == np.float32
|
||||
), f"model_input_element_type: {model_input_element_type}"
|
||||
# tensor_input must be nhwc
|
||||
assert tensor_input.shape[3] == 3, f"tensor_input.shape: {tensor_input.shape}"
|
||||
if tensor_input.shape[1:3] != model_input_shape[2:4]:
|
||||
logger.warn(
|
||||
f"preprocess: tensor_input.shape {tensor_input.shape} and model_input_shape {model_input_shape} do not match!"
|
||||
)
|
||||
# cv2.dnn.blobFromImage is faster than numpying it
|
||||
return cv2.dnn.blobFromImage(
|
||||
tensor_input[0],
|
||||
1.0 / 255,
|
||||
(model_input_shape[3], model_input_shape[2]),
|
||||
None,
|
||||
swapRB=False,
|
||||
)
|
||||
@@ -15,7 +15,7 @@ from frigate.models import Event
|
||||
# Squelch posthog logging
|
||||
logging.getLogger("chromadb.telemetry.product.posthog").setLevel(logging.CRITICAL)
|
||||
|
||||
# Hotsawp the sqlite3 module for Chroma compatibility
|
||||
# Hot-swap the sqlite3 module for Chroma compatibility
|
||||
try:
|
||||
from chromadb import Collection
|
||||
from chromadb import HttpClient as ChromaClient
|
||||
@@ -85,7 +85,10 @@ class Embeddings:
|
||||
@property
|
||||
def description(self) -> Collection:
|
||||
return self.client.get_or_create_collection(
|
||||
name="event_description", embedding_function=MiniLMEmbedding()
|
||||
name="event_description",
|
||||
embedding_function=MiniLMEmbedding(
|
||||
preferred_providers=["CPUExecutionProvider"]
|
||||
),
|
||||
)
|
||||
|
||||
def reindex(self) -> None:
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
"""CLIP Embeddings for Frigate."""
|
||||
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Tuple, Union
|
||||
|
||||
import onnxruntime as ort
|
||||
import requests
|
||||
from chromadb import EmbeddingFunction, Embeddings
|
||||
from chromadb.api.types import (
|
||||
Documents,
|
||||
@@ -39,10 +43,49 @@ class Clip(OnnxClip):
|
||||
models = []
|
||||
for model_file in [IMAGE_MODEL_FILE, TEXT_MODEL_FILE]:
|
||||
path = os.path.join(MODEL_CACHE_DIR, "clip", model_file)
|
||||
models.append(OnnxClip._load_model(path, silent))
|
||||
models.append(Clip._load_model(path, silent))
|
||||
|
||||
return models[0], models[1]
|
||||
|
||||
@staticmethod
|
||||
def _load_model(path: str, silent: bool):
|
||||
providers = ["CPUExecutionProvider"]
|
||||
|
||||
try:
|
||||
if os.path.exists(path):
|
||||
return ort.InferenceSession(path, providers=providers)
|
||||
else:
|
||||
raise FileNotFoundError(
|
||||
errno.ENOENT,
|
||||
os.strerror(errno.ENOENT),
|
||||
path,
|
||||
)
|
||||
except Exception:
|
||||
s3_url = f"https://lakera-clip.s3.eu-west-1.amazonaws.com/{os.path.basename(path)}"
|
||||
if not silent:
|
||||
logging.info(
|
||||
f"The model file ({path}) doesn't exist "
|
||||
f"or it is invalid. Downloading it from the public S3 "
|
||||
f"bucket: {s3_url}." # noqa: E501
|
||||
)
|
||||
|
||||
# Download from S3
|
||||
# Saving to a temporary file first to avoid corrupting the file
|
||||
temporary_filename = Path(path).with_name(os.path.basename(path) + ".part")
|
||||
|
||||
# Create any missing directories in the path
|
||||
temporary_filename.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with requests.get(s3_url, stream=True) as r:
|
||||
r.raise_for_status()
|
||||
with open(temporary_filename, "wb") as f:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
f.flush()
|
||||
# Finally move the temporary file to the correct location
|
||||
temporary_filename.rename(path)
|
||||
return ort.InferenceSession(path, providers=providers)
|
||||
|
||||
|
||||
class ClipEmbedding(EmbeddingFunction):
|
||||
"""Embedding function for CLIP model used in Chroma."""
|
||||
|
||||
@@ -78,9 +78,11 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
try:
|
||||
frame_id = f"{camera}{data['frame_time']}"
|
||||
yuv_frame = self.frame_manager.get(frame_id, camera_config.frame_shape_yuv)
|
||||
data["thumbnail"] = self._create_thumbnail(yuv_frame, data["box"])
|
||||
self.tracked_events[data["id"]].append(data)
|
||||
self.frame_manager.close(frame_id)
|
||||
|
||||
if yuv_frame is not None:
|
||||
data["thumbnail"] = self._create_thumbnail(yuv_frame, data["box"])
|
||||
self.tracked_events[data["id"]].append(data)
|
||||
self.frame_manager.close(frame_id)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
@@ -169,8 +171,11 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self, event: Event, thumbnails: list[bytes], metadata: dict
|
||||
) -> None:
|
||||
"""Embed the description for an event."""
|
||||
camera_config = self.config.cameras[event.camera]
|
||||
|
||||
description = self.genai_client.generate_description(thumbnails, metadata)
|
||||
description = self.genai_client.generate_description(
|
||||
camera_config, thumbnails, metadata
|
||||
)
|
||||
|
||||
if description is None:
|
||||
logger.debug("Failed to generate description for %s", event.id)
|
||||
|
||||
@@ -50,7 +50,7 @@ def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]:
|
||||
or get_ffmpeg_arg_list(ffmpeg.input_args)
|
||||
)
|
||||
return (
|
||||
["ffmpeg", "-vn", "-threads", "1"]
|
||||
[ffmpeg.ffmpeg_path, "-vn", "-threads", "1"]
|
||||
+ input_args
|
||||
+ ["-i"]
|
||||
+ [ffmpeg_input.path]
|
||||
@@ -288,10 +288,7 @@ class AudioEventMaintainer(threading.Thread):
|
||||
|
||||
resp = requests.put(
|
||||
f"{FRIGATE_LOCALHOST}/api/events/{detection['id']}/end",
|
||||
json={
|
||||
"end_time": detection["last_detection"]
|
||||
+ self.config.record.events.post_capture
|
||||
},
|
||||
json={"end_time": detection["last_detection"]},
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
|
||||
@@ -68,7 +68,10 @@ class EventCleanup(threading.Thread):
|
||||
def expire(self, media_type: EventCleanupType) -> list[str]:
|
||||
## Expire events from unlisted cameras based on the global config
|
||||
if media_type == EventCleanupType.clips:
|
||||
retain_config = self.config.record.events.retain
|
||||
expire_days = max(
|
||||
self.config.record.alerts.retain.days,
|
||||
self.config.record.detections.retain.days,
|
||||
)
|
||||
file_extension = None # mp4 clips are no longer stored in /clips
|
||||
update_params = {"has_clip": False}
|
||||
else:
|
||||
@@ -82,7 +85,11 @@ class EventCleanup(threading.Thread):
|
||||
# loop over object types in db
|
||||
for event in distinct_labels:
|
||||
# get expiration time for this label
|
||||
expire_days = retain_config.objects.get(event.label, retain_config.default)
|
||||
if media_type == EventCleanupType.snapshots:
|
||||
expire_days = retain_config.objects.get(
|
||||
event.label, retain_config.default
|
||||
)
|
||||
|
||||
expire_after = (
|
||||
datetime.datetime.now() - datetime.timedelta(days=expire_days)
|
||||
).timestamp()
|
||||
@@ -132,7 +139,10 @@ class EventCleanup(threading.Thread):
|
||||
## Expire events from cameras based on the camera config
|
||||
for name, camera in self.config.cameras.items():
|
||||
if media_type == EventCleanupType.clips:
|
||||
retain_config = camera.record.events.retain
|
||||
expire_days = max(
|
||||
camera.record.alerts.retain.days,
|
||||
camera.record.detections.retain.days,
|
||||
)
|
||||
else:
|
||||
retain_config = camera.snapshots.retain
|
||||
|
||||
@@ -142,9 +152,11 @@ class EventCleanup(threading.Thread):
|
||||
# loop over object types in db
|
||||
for event in distinct_labels:
|
||||
# get expiration time for this label
|
||||
expire_days = retain_config.objects.get(
|
||||
event.label, retain_config.default
|
||||
)
|
||||
if media_type == EventCleanupType.snapshots:
|
||||
expire_days = retain_config.objects.get(
|
||||
event.label, retain_config.default
|
||||
)
|
||||
|
||||
expire_after = (
|
||||
datetime.datetime.now() - datetime.timedelta(days=expire_days)
|
||||
).timestamp()
|
||||
|
||||
@@ -57,11 +57,7 @@ class ExternalEventProcessor:
|
||||
thumbnail = self._write_images(
|
||||
camera_config, label, event_id, draw, snapshot_frame
|
||||
)
|
||||
end = (
|
||||
now + duration + camera_config.record.events.post_capture
|
||||
if duration is not None
|
||||
else None
|
||||
)
|
||||
end = now + duration if duration is not None else None
|
||||
|
||||
self.event_sender.publish(
|
||||
(
|
||||
@@ -74,7 +70,7 @@ class ExternalEventProcessor:
|
||||
"sub_label": sub_label,
|
||||
"score": score,
|
||||
"camera": camera,
|
||||
"start_time": now - camera_config.record.events.pre_capture,
|
||||
"start_time": now,
|
||||
"end_time": end,
|
||||
"thumbnail": thumbnail,
|
||||
"has_clip": camera_config.record.enabled and include_recording,
|
||||
|
||||
@@ -5,7 +5,7 @@ from multiprocessing.synchronize import Event as MpEvent
|
||||
from typing import Dict
|
||||
|
||||
from frigate.comms.events_updater import EventEndPublisher, EventUpdateSubscriber
|
||||
from frigate.config import EventsConfig, FrigateConfig
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.events.types import EventStateEnum, EventTypeEnum
|
||||
from frigate.models import Event
|
||||
from frigate.util.builtin import to_relative_box
|
||||
@@ -128,16 +128,13 @@ class EventProcessor(threading.Thread):
|
||||
if should_update_db(self.events_in_process[event_data["id"]], event_data):
|
||||
updated_db = True
|
||||
camera_config = self.config.cameras[camera]
|
||||
event_config: EventsConfig = camera_config.record.events
|
||||
width = camera_config.detect.width
|
||||
height = camera_config.detect.height
|
||||
first_detector = list(self.config.detectors.values())[0]
|
||||
|
||||
start_time = event_data["start_time"] - event_config.pre_capture
|
||||
start_time = event_data["start_time"]
|
||||
end_time = (
|
||||
None
|
||||
if event_data["end_time"] is None
|
||||
else event_data["end_time"] + event_config.post_capture
|
||||
None if event_data["end_time"] is None else event_data["end_time"]
|
||||
)
|
||||
# score of the snapshot
|
||||
score = (
|
||||
|
||||
@@ -5,7 +5,11 @@ import os
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
from frigate.const import FFMPEG_HWACCEL_NVIDIA, FFMPEG_HWACCEL_VAAPI
|
||||
from frigate.const import (
|
||||
FFMPEG_HWACCEL_NVIDIA,
|
||||
FFMPEG_HWACCEL_VAAPI,
|
||||
FFMPEG_HWACCEL_VULKAN,
|
||||
)
|
||||
from frigate.util.services import vainfo_hwaccel
|
||||
from frigate.version import VERSION
|
||||
|
||||
@@ -45,12 +49,12 @@ class LibvaGpuSelector:
|
||||
|
||||
FPS_VFR_PARAM = (
|
||||
"-fps_mode vfr"
|
||||
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59
|
||||
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59") or "59") >= 59
|
||||
else "-vsync 2"
|
||||
)
|
||||
TIMEOUT_PARAM = (
|
||||
"-timeout"
|
||||
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59
|
||||
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59") or "59") >= 59
|
||||
else "-stimeout"
|
||||
)
|
||||
|
||||
@@ -71,6 +75,8 @@ PRESETS_HW_ACCEL_DECODE = {
|
||||
"preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}",
|
||||
"preset-rk-h264": "-hwaccel rkmpp -hwaccel_output_format drm_prime",
|
||||
"preset-rk-h265": "-hwaccel rkmpp -hwaccel_output_format drm_prime",
|
||||
# experimental presets
|
||||
FFMPEG_HWACCEL_VULKAN: "-hwaccel vulkan -init_hw_device vulkan=gpu:0 -filter_hw_device gpu -hwaccel_output_format vulkan",
|
||||
}
|
||||
PRESETS_HW_ACCEL_DECODE["preset-nvidia-h264"] = PRESETS_HW_ACCEL_DECODE[
|
||||
FFMPEG_HWACCEL_NVIDIA
|
||||
@@ -85,15 +91,17 @@ PRESETS_HW_ACCEL_DECODE["preset-nvidia-mjpeg"] = PRESETS_HW_ACCEL_DECODE[
|
||||
PRESETS_HW_ACCEL_SCALE = {
|
||||
"preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}",
|
||||
"preset-rpi-64-h265": "-r {0} -vf fps={0},scale={1}:{2}",
|
||||
FFMPEG_HWACCEL_VAAPI: "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||
FFMPEG_HWACCEL_VAAPI: "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=nv12,eq=gamma=1.05",
|
||||
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||
FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||
FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2},hwdownload,format=nv12,eq=gamma=1.05",
|
||||
"preset-jetson-h264": "-r {0}", # scaled in decoder
|
||||
"preset-jetson-h265": "-r {0}", # scaled in decoder
|
||||
"preset-rk-h264": "-r {0} -vf scale_rkrga=w={1}:h={2}:format=yuv420p:force_original_aspect_ratio=0,hwmap=mode=read,format=yuv420p",
|
||||
"preset-rk-h265": "-r {0} -vf scale_rkrga=w={1}:h={2}:format=yuv420p:force_original_aspect_ratio=0,hwmap=mode=read,format=yuv420p",
|
||||
"default": "-r {0} -vf fps={0},scale={1}:{2}",
|
||||
# experimental presets
|
||||
FFMPEG_HWACCEL_VULKAN: "-r {0} -vf fps={0},hwupload,scale_vulkan=w={1}:h={2},hwdownload",
|
||||
}
|
||||
PRESETS_HW_ACCEL_SCALE["preset-nvidia-h264"] = PRESETS_HW_ACCEL_SCALE[
|
||||
FFMPEG_HWACCEL_NVIDIA
|
||||
@@ -103,17 +111,17 @@ PRESETS_HW_ACCEL_SCALE["preset-nvidia-h265"] = PRESETS_HW_ACCEL_SCALE[
|
||||
]
|
||||
|
||||
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {
|
||||
"preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}",
|
||||
"preset-rpi-64-h265": "ffmpeg -hide_banner {0} -c:v hevc_v4l2m2m {1}",
|
||||
FFMPEG_HWACCEL_VAAPI: "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {1}",
|
||||
"preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
|
||||
"preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
|
||||
FFMPEG_HWACCEL_NVIDIA: "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}",
|
||||
"preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
|
||||
"preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
|
||||
"preset-rk-h264": "ffmpeg -hide_banner {0} -c:v h264_rkmpp -profile:v high {1}",
|
||||
"preset-rk-h265": "ffmpeg -hide_banner {0} -c:v hevc_rkmpp -profile:v high {1}",
|
||||
"default": "ffmpeg -hide_banner {0} -c:v libx264 -g 50 -profile:v high -level:v 4.1 -preset:v superfast -tune:v zerolatency {1}",
|
||||
"preset-rpi-64-h264": "{0} -hide_banner {1} -c:v h264_v4l2m2m {2}",
|
||||
"preset-rpi-64-h265": "{0} -hide_banner {1} -c:v hevc_v4l2m2m {2}",
|
||||
FFMPEG_HWACCEL_VAAPI: "{0} -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {3} {1} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {2}",
|
||||
"preset-intel-qsv-h264": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {2}",
|
||||
"preset-intel-qsv-h265": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {2}",
|
||||
FFMPEG_HWACCEL_NVIDIA: "{0} -hide_banner {1} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {2}",
|
||||
"preset-jetson-h264": "{0} -hide_banner {1} -c:v h264_nvmpi -profile high {2}",
|
||||
"preset-jetson-h265": "{0} -hide_banner {1} -c:v h264_nvmpi -profile high {2}",
|
||||
"preset-rk-h264": "{0} -hide_banner {1} -c:v h264_rkmpp -profile:v high {2}",
|
||||
"preset-rk-h265": "{0} -hide_banner {1} -c:v hevc_rkmpp -profile:v high {2}",
|
||||
"default": "{0} -hide_banner {1} -c:v libx264 -g 50 -profile:v high -level:v 4.1 -preset:v superfast -tune:v zerolatency {2}",
|
||||
}
|
||||
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE["preset-nvidia-h264"] = (
|
||||
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[FFMPEG_HWACCEL_NVIDIA]
|
||||
@@ -123,18 +131,18 @@ PRESETS_HW_ACCEL_ENCODE_BIRDSEYE["preset-nvidia-h265"] = (
|
||||
)
|
||||
|
||||
PRESETS_HW_ACCEL_ENCODE_TIMELAPSE = {
|
||||
"preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -pix_fmt yuv420p {1}",
|
||||
"preset-rpi-64-h265": "ffmpeg -hide_banner {0} -c:v hevc_v4l2m2m -pix_fmt yuv420p {1}",
|
||||
FFMPEG_HWACCEL_VAAPI: "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi {1}",
|
||||
"preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
|
||||
"preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v hevc_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
|
||||
FFMPEG_HWACCEL_NVIDIA: "ffmpeg -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {0} -c:v h264_nvenc {1}",
|
||||
"preset-nvidia-h265": "ffmpeg -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {0} -c:v hevc_nvenc {1}",
|
||||
"preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
|
||||
"preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v hevc_nvmpi -profile high {1}",
|
||||
"preset-rk-h264": "ffmpeg -hide_banner {0} -c:v h264_rkmpp -profile:v high {1}",
|
||||
"preset-rk-h265": "ffmpeg -hide_banner {0} -c:v hevc_rkmpp -profile:v high {1}",
|
||||
"default": "ffmpeg -hide_banner {0} -c:v libx264 -preset:v ultrafast -tune:v zerolatency {1}",
|
||||
"preset-rpi-64-h264": "{0} -hide_banner {1} -c:v h264_v4l2m2m -pix_fmt yuv420p {2}",
|
||||
"preset-rpi-64-h265": "{0} -hide_banner {1} -c:v hevc_v4l2m2m -pix_fmt yuv420p {2}",
|
||||
FFMPEG_HWACCEL_VAAPI: "{0} -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {3} {1} -c:v h264_vaapi {2}",
|
||||
"preset-intel-qsv-h264": "{0} -hide_banner {1} -c:v h264_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {2}",
|
||||
"preset-intel-qsv-h265": "{0} -hide_banner {1} -c:v hevc_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {2}",
|
||||
FFMPEG_HWACCEL_NVIDIA: "{0} -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {1} -c:v h264_nvenc {2}",
|
||||
"preset-nvidia-h265": "{0} -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {1} -c:v hevc_nvenc {2}",
|
||||
"preset-jetson-h264": "{0} -hide_banner {1} -c:v h264_nvmpi -profile high {2}",
|
||||
"preset-jetson-h265": "{0} -hide_banner {1} -c:v hevc_nvmpi -profile high {2}",
|
||||
"preset-rk-h264": "{0} -hide_banner {1} -c:v h264_rkmpp -profile:v high {2}",
|
||||
"preset-rk-h265": "{0} -hide_banner {1} -c:v hevc_rkmpp -profile:v high {2}",
|
||||
"default": "{0} -hide_banner {1} -c:v libx264 -preset:v ultrafast -tune:v zerolatency {2}",
|
||||
}
|
||||
PRESETS_HW_ACCEL_ENCODE_TIMELAPSE["preset-nvidia-h264"] = (
|
||||
PRESETS_HW_ACCEL_ENCODE_TIMELAPSE[FFMPEG_HWACCEL_NVIDIA]
|
||||
@@ -142,7 +150,7 @@ PRESETS_HW_ACCEL_ENCODE_TIMELAPSE["preset-nvidia-h264"] = (
|
||||
|
||||
# encoding of previews is only done on CPU due to comparable encode times and better quality from libx264
|
||||
PRESETS_HW_ACCEL_ENCODE_PREVIEW = {
|
||||
"default": "ffmpeg -hide_banner {0} -c:v libx264 -profile:v baseline -preset:v ultrafast {1}",
|
||||
"default": "{0} -hide_banner {1} -c:v libx264 -profile:v baseline -preset:v ultrafast {2}",
|
||||
}
|
||||
|
||||
|
||||
@@ -177,6 +185,15 @@ def parse_preset_hardware_acceleration_scale(
|
||||
else:
|
||||
scale = PRESETS_HW_ACCEL_SCALE.get(arg, PRESETS_HW_ACCEL_SCALE["default"])
|
||||
|
||||
if (
|
||||
",hwdownload,format=nv12,eq=gamma=1.05" in scale
|
||||
and os.environ.get("FFMPEG_DISABLE_GAMMA_EQUALIZER") is not None
|
||||
):
|
||||
scale.replace(
|
||||
",hwdownload,format=nv12,eq=gamma=1.05",
|
||||
":format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||
)
|
||||
|
||||
scale = scale.format(fps, width, height).split(" ")
|
||||
scale.extend(detect_args)
|
||||
return scale
|
||||
@@ -189,7 +206,11 @@ class EncodeTypeEnum(str, Enum):
|
||||
|
||||
|
||||
def parse_preset_hardware_acceleration_encode(
|
||||
arg: Any, input: str, output: str, type: EncodeTypeEnum = EncodeTypeEnum.birdseye
|
||||
ffmpeg_path: str,
|
||||
arg: Any,
|
||||
input: str,
|
||||
output: str,
|
||||
type: EncodeTypeEnum = EncodeTypeEnum.birdseye,
|
||||
) -> str:
|
||||
"""Return the correct scaling preset or default preset if none is set."""
|
||||
if type == EncodeTypeEnum.birdseye:
|
||||
@@ -207,6 +228,7 @@ def parse_preset_hardware_acceleration_encode(
|
||||
arg = "default"
|
||||
|
||||
return arg_map.get(arg, arg_map["default"]).format(
|
||||
ffmpeg_path,
|
||||
input,
|
||||
output,
|
||||
_gpu_selector.get_selected_gpu(),
|
||||
|
||||
@@ -4,7 +4,7 @@ import importlib
|
||||
import os
|
||||
from typing import Optional
|
||||
|
||||
from frigate.config import GenAIConfig, GenAIProviderEnum
|
||||
from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum
|
||||
|
||||
PROVIDERS = {}
|
||||
|
||||
@@ -28,11 +28,14 @@ class GenAIClient:
|
||||
self.provider = self._init_provider()
|
||||
|
||||
def generate_description(
|
||||
self, thumbnails: list[bytes], metadata: dict[str, any]
|
||||
self,
|
||||
camera_config: CameraConfig,
|
||||
thumbnails: list[bytes],
|
||||
metadata: dict[str, any],
|
||||
) -> Optional[str]:
|
||||
"""Generate a description for the frame."""
|
||||
prompt = self.genai_config.object_prompts.get(
|
||||
metadata["label"], self.genai_config.prompt
|
||||
prompt = camera_config.genai.object_prompts.get(
|
||||
metadata["label"], camera_config.genai.prompt
|
||||
).format(**metadata)
|
||||
return self._send(prompt, thumbnails)
|
||||
|
||||
|
||||
@@ -1,71 +1,71 @@
|
||||
# adapted from https://medium.com/@jonathonbao/python3-logging-with-multiprocessing-f51f460b8778
|
||||
import atexit
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
import os
|
||||
import queue
|
||||
import signal
|
||||
import threading
|
||||
from collections import deque
|
||||
from logging import handlers
|
||||
from multiprocessing import Queue
|
||||
from types import FrameType
|
||||
from contextlib import AbstractContextManager, ContextDecorator
|
||||
from logging.handlers import QueueHandler, QueueListener
|
||||
from types import TracebackType
|
||||
from typing import Deque, Optional
|
||||
|
||||
from setproctitle import setproctitle
|
||||
from typing_extensions import Self
|
||||
|
||||
from frigate.util.builtin import clean_camera_user_pass
|
||||
|
||||
|
||||
def listener_configurer() -> None:
|
||||
root = logging.getLogger()
|
||||
|
||||
if root.hasHandlers():
|
||||
root.handlers.clear()
|
||||
|
||||
console_handler = logging.StreamHandler()
|
||||
formatter = logging.Formatter(
|
||||
"[%(asctime)s] %(name)-30s %(levelname)-8s: %(message)s", "%Y-%m-%d %H:%M:%S"
|
||||
LOG_HANDLER = logging.StreamHandler()
|
||||
LOG_HANDLER.setFormatter(
|
||||
logging.Formatter(
|
||||
"[%(asctime)s] %(name)-30s %(levelname)-8s: %(message)s",
|
||||
"%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
console_handler.setFormatter(formatter)
|
||||
root.addHandler(console_handler)
|
||||
root.setLevel(logging.INFO)
|
||||
)
|
||||
|
||||
LOG_HANDLER.addFilter(
|
||||
lambda record: not record.getMessage().startswith(
|
||||
"You are using a scalar distance function"
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def root_configurer(queue: Queue) -> None:
|
||||
h = handlers.QueueHandler(queue)
|
||||
root = logging.getLogger()
|
||||
class log_thread(AbstractContextManager, ContextDecorator):
|
||||
def __init__(self, *, handler: logging.Handler = LOG_HANDLER):
|
||||
super().__init__()
|
||||
|
||||
if root.hasHandlers():
|
||||
root.handlers.clear()
|
||||
self._handler = handler
|
||||
|
||||
root.addHandler(h)
|
||||
root.setLevel(logging.INFO)
|
||||
log_queue: mp.Queue = mp.Queue()
|
||||
self._queue_handler = QueueHandler(log_queue)
|
||||
|
||||
self._log_listener = QueueListener(
|
||||
log_queue, self._handler, respect_handler_level=True
|
||||
)
|
||||
|
||||
def log_process(log_queue: Queue) -> None:
|
||||
threading.current_thread().name = "logger"
|
||||
setproctitle("frigate.logger")
|
||||
listener_configurer()
|
||||
@property
|
||||
def handler(self) -> logging.Handler:
|
||||
return self._handler
|
||||
|
||||
stop_event = mp.Event()
|
||||
def _stop_thread(self) -> None:
|
||||
self._log_listener.stop()
|
||||
|
||||
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
|
||||
stop_event.set()
|
||||
def __enter__(self) -> Self:
|
||||
logging.getLogger().addHandler(self._queue_handler)
|
||||
|
||||
signal.signal(signal.SIGTERM, receiveSignal)
|
||||
signal.signal(signal.SIGINT, receiveSignal)
|
||||
atexit.register(self._stop_thread)
|
||||
self._log_listener.start()
|
||||
|
||||
while True:
|
||||
try:
|
||||
record = log_queue.get(block=True, timeout=1.0)
|
||||
except queue.Empty:
|
||||
if stop_event.is_set():
|
||||
break
|
||||
continue
|
||||
if record.msg.startswith("You are using a scalar distance function"):
|
||||
continue
|
||||
logger = logging.getLogger(record.name)
|
||||
logger.handle(record)
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[type[BaseException]],
|
||||
exc_info: Optional[BaseException],
|
||||
exc_tb: Optional[TracebackType],
|
||||
) -> None:
|
||||
logging.getLogger().removeHandler(self._queue_handler)
|
||||
|
||||
atexit.unregister(self._stop_thread)
|
||||
self._stop_thread()
|
||||
|
||||
|
||||
# based on https://codereview.stackexchange.com/a/17959
|
||||
|
||||
@@ -55,13 +55,13 @@ class FrigateMotionDetector(MotionDetector):
|
||||
|
||||
# Improve contrast
|
||||
if self.improve_contrast.value:
|
||||
minval = np.percentile(resized_frame, 4)
|
||||
maxval = np.percentile(resized_frame, 96)
|
||||
min_value = np.percentile(resized_frame, 4)
|
||||
max_value = np.percentile(resized_frame, 96)
|
||||
# don't adjust if the image is a single color
|
||||
if minval < maxval:
|
||||
resized_frame = np.clip(resized_frame, minval, maxval)
|
||||
if min_value < max_value:
|
||||
resized_frame = np.clip(resized_frame, min_value, max_value)
|
||||
resized_frame = (
|
||||
((resized_frame - minval) / (maxval - minval)) * 255
|
||||
((resized_frame - min_value) / (max_value - min_value)) * 255
|
||||
).astype(np.uint8)
|
||||
|
||||
# mask frame
|
||||
@@ -100,13 +100,13 @@ class FrigateMotionDetector(MotionDetector):
|
||||
# dilate the thresholded image to fill in holes, then find contours
|
||||
# on thresholded image
|
||||
thresh_dilated = cv2.dilate(thresh, None, iterations=2)
|
||||
cnts = cv2.findContours(
|
||||
contours = cv2.findContours(
|
||||
thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
|
||||
)
|
||||
cnts = imutils.grab_contours(cnts)
|
||||
contours = imutils.grab_contours(contours)
|
||||
|
||||
# loop over the contours
|
||||
for c in cnts:
|
||||
for c in contours:
|
||||
# if the contour is big enough, count it as motion
|
||||
contour_area = cv2.contourArea(c)
|
||||
if contour_area > self.contour_area.value:
|
||||
@@ -124,7 +124,7 @@ class FrigateMotionDetector(MotionDetector):
|
||||
thresh_dilated = cv2.cvtColor(thresh_dilated, cv2.COLOR_GRAY2BGR)
|
||||
# print("--------")
|
||||
# print(self.frame_counter)
|
||||
for c in cnts:
|
||||
for c in contours:
|
||||
contour_area = cv2.contourArea(c)
|
||||
if contour_area > self.contour_area.value:
|
||||
x, y, w, h = cv2.boundingRect(c)
|
||||
|
||||
@@ -79,12 +79,15 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
# Improve contrast
|
||||
if self.config.improve_contrast:
|
||||
# TODO tracking moving average of min/max to avoid sudden contrast changes
|
||||
minval = np.percentile(resized_frame, 4).astype(np.uint8)
|
||||
maxval = np.percentile(resized_frame, 96).astype(np.uint8)
|
||||
min_value = np.percentile(resized_frame, 4).astype(np.uint8)
|
||||
max_value = np.percentile(resized_frame, 96).astype(np.uint8)
|
||||
# skip contrast calcs if the image is a single color
|
||||
if minval < maxval:
|
||||
if min_value < max_value:
|
||||
# keep track of the last 50 contrast values
|
||||
self.contrast_values[self.contrast_values_index] = [minval, maxval]
|
||||
self.contrast_values[self.contrast_values_index] = [
|
||||
min_value,
|
||||
max_value,
|
||||
]
|
||||
self.contrast_values_index += 1
|
||||
if self.contrast_values_index == len(self.contrast_values):
|
||||
self.contrast_values_index = 0
|
||||
@@ -122,14 +125,14 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
# dilate the thresholded image to fill in holes, then find contours
|
||||
# on thresholded image
|
||||
thresh_dilated = cv2.dilate(thresh, None, iterations=1)
|
||||
cnts = cv2.findContours(
|
||||
contours = cv2.findContours(
|
||||
thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
|
||||
)
|
||||
cnts = imutils.grab_contours(cnts)
|
||||
contours = imutils.grab_contours(contours)
|
||||
|
||||
# loop over the contours
|
||||
total_contour_area = 0
|
||||
for c in cnts:
|
||||
for c in contours:
|
||||
# if the contour is big enough, count it as motion
|
||||
contour_area = cv2.contourArea(c)
|
||||
total_contour_area += contour_area
|
||||
|
||||
@@ -118,12 +118,14 @@ def run_detector(
|
||||
)
|
||||
|
||||
if input_frame is None:
|
||||
logger.warning(f"Failed to get frame {connection_id} from SHM")
|
||||
continue
|
||||
|
||||
# detect and send the output
|
||||
start.value = datetime.datetime.now().timestamp()
|
||||
detections = object_detector.detect_raw(input_frame)
|
||||
duration = datetime.datetime.now().timestamp() - start.value
|
||||
frame_manager.close(connection_id)
|
||||
outputs[connection_id]["np"][:] = detections[:]
|
||||
out_events[connection_id].set()
|
||||
start.value = 0.0
|
||||
|
||||
@@ -132,6 +132,7 @@ class TrackedObject:
|
||||
self.last_updated = 0
|
||||
self.last_published = 0
|
||||
self.frame = None
|
||||
self.active = True
|
||||
self.previous = self.to_dict()
|
||||
|
||||
def _is_false_positive(self):
|
||||
@@ -146,7 +147,7 @@ class TrackedObject:
|
||||
"""get median of scores for object."""
|
||||
return median(self.score_history)
|
||||
|
||||
def update(self, current_frame_time, obj_data):
|
||||
def update(self, current_frame_time: float, obj_data, has_valid_frame: bool):
|
||||
thumb_update = False
|
||||
significant_change = False
|
||||
autotracker_update = False
|
||||
@@ -165,8 +166,9 @@ class TrackedObject:
|
||||
if self.computed_score > self.top_score:
|
||||
self.top_score = self.computed_score
|
||||
self.false_positive = self._is_false_positive()
|
||||
self.active = self.is_active()
|
||||
|
||||
if not self.false_positive:
|
||||
if not self.false_positive and has_valid_frame:
|
||||
# determine if this frame is a better thumbnail
|
||||
if self.thumbnail_data is None or is_better_thumbnail(
|
||||
self.obj_data["label"],
|
||||
@@ -249,11 +251,8 @@ class TrackedObject:
|
||||
if self.obj_data["attributes"] != obj_data["attributes"]:
|
||||
significant_change = True
|
||||
|
||||
# if the motionless_count reaches the stationary threshold
|
||||
if (
|
||||
self.obj_data["motionless_count"]
|
||||
== self.camera_config.detect.stationary.threshold
|
||||
):
|
||||
# if the state changed between stationary and active
|
||||
if self.previous["active"] != self.active:
|
||||
significant_change = True
|
||||
|
||||
# update at least once per minute
|
||||
@@ -285,8 +284,8 @@ class TrackedObject:
|
||||
"area": self.obj_data["area"],
|
||||
"ratio": self.obj_data["ratio"],
|
||||
"region": self.obj_data["region"],
|
||||
"stationary": self.obj_data["motionless_count"]
|
||||
> self.camera_config.detect.stationary.threshold,
|
||||
"active": self.active,
|
||||
"stationary": not self.active,
|
||||
"motionless_count": self.obj_data["motionless_count"],
|
||||
"position_changes": self.obj_data["position_changes"],
|
||||
"current_zones": self.current_zones.copy(),
|
||||
@@ -302,6 +301,15 @@ class TrackedObject:
|
||||
|
||||
return event
|
||||
|
||||
def is_active(self):
|
||||
return not self.is_stationary()
|
||||
|
||||
def is_stationary(self):
|
||||
return (
|
||||
self.obj_data["motionless_count"]
|
||||
> self.camera_config.detect.stationary.threshold
|
||||
)
|
||||
|
||||
def get_thumbnail(self):
|
||||
if (
|
||||
self.thumbnail_data is None
|
||||
@@ -476,6 +484,7 @@ class CameraState:
|
||||
self.frame_manager = frame_manager
|
||||
self.best_objects: dict[str, TrackedObject] = {}
|
||||
self.object_counts = defaultdict(int)
|
||||
self.active_object_counts = defaultdict(int)
|
||||
self.tracked_objects: dict[str, TrackedObject] = {}
|
||||
self.frame_cache = {}
|
||||
self.zone_objects = defaultdict(list)
|
||||
@@ -659,10 +668,14 @@ class CameraState:
|
||||
def update(self, frame_time, current_detections, motion_boxes, regions):
|
||||
# get the new frame
|
||||
frame_id = f"{self.name}{frame_time}"
|
||||
|
||||
current_frame = self.frame_manager.get(
|
||||
frame_id, self.camera_config.frame_shape_yuv
|
||||
)
|
||||
|
||||
if current_frame is None:
|
||||
logger.debug(f"Failed to get frame {frame_id} from SHM")
|
||||
|
||||
tracked_objects = self.tracked_objects.copy()
|
||||
current_ids = set(current_detections.keys())
|
||||
previous_ids = set(tracked_objects.keys())
|
||||
@@ -686,14 +699,14 @@ class CameraState:
|
||||
for id in updated_ids:
|
||||
updated_obj = tracked_objects[id]
|
||||
thumb_update, significant_update, autotracker_update = updated_obj.update(
|
||||
frame_time, current_detections[id]
|
||||
frame_time, current_detections[id], current_frame is not None
|
||||
)
|
||||
|
||||
if autotracker_update or significant_update:
|
||||
for c in self.callbacks["autotrack"]:
|
||||
c(self.name, updated_obj, frame_time)
|
||||
|
||||
if thumb_update:
|
||||
if thumb_update and current_frame is not None:
|
||||
# ensure this frame is stored in the cache
|
||||
if (
|
||||
updated_obj.thumbnail_data["frame_time"] == frame_time
|
||||
@@ -732,10 +745,7 @@ class CameraState:
|
||||
|
||||
for obj in tracked_objects.values():
|
||||
object_type = obj.obj_data["label"]
|
||||
active = (
|
||||
obj.obj_data["motionless_count"]
|
||||
< self.camera_config.detect.stationary.threshold
|
||||
)
|
||||
active = obj.is_active()
|
||||
|
||||
if not obj.false_positive:
|
||||
label = object_type
|
||||
@@ -760,9 +770,15 @@ class CameraState:
|
||||
}
|
||||
)
|
||||
|
||||
# if the object's thumbnail is not from the current frame
|
||||
if obj.false_positive or obj.thumbnail_data["frame_time"] != frame_time:
|
||||
# if we don't have access to the current frame or
|
||||
# if the object's thumbnail is not from the current frame, skip
|
||||
if (
|
||||
current_frame is None
|
||||
or obj.false_positive
|
||||
or obj.thumbnail_data["frame_time"] != frame_time
|
||||
):
|
||||
continue
|
||||
|
||||
if object_type in self.best_objects:
|
||||
current_best = self.best_objects[object_type]
|
||||
now = datetime.datetime.now().timestamp()
|
||||
@@ -796,10 +812,17 @@ class CameraState:
|
||||
if not obj.false_positive
|
||||
)
|
||||
|
||||
active_obj_counter = Counter(
|
||||
obj.obj_data["label"]
|
||||
for obj in tracked_objects.values()
|
||||
if not obj.false_positive and obj.active
|
||||
)
|
||||
|
||||
# keep track of all labels detected for this camera
|
||||
total_label_count = 0
|
||||
total_active_label_count = 0
|
||||
|
||||
# report on detected objects
|
||||
# report on all detected objects
|
||||
for obj_name, count in obj_counter.items():
|
||||
total_label_count += count
|
||||
|
||||
@@ -808,12 +831,30 @@ class CameraState:
|
||||
for c in self.callbacks["object_status"]:
|
||||
c(self.name, obj_name, count)
|
||||
|
||||
# update the active count on all detected objects
|
||||
# To ensure we emit 0's if all objects are stationary, we need to loop
|
||||
# over the set of all objects, not just active ones.
|
||||
for obj_name in set(obj_counter):
|
||||
count = active_obj_counter[obj_name]
|
||||
total_active_label_count += count
|
||||
|
||||
if count != self.active_object_counts[obj_name]:
|
||||
self.active_object_counts[obj_name] = count
|
||||
for c in self.callbacks["active_object_status"]:
|
||||
c(self.name, obj_name, count)
|
||||
|
||||
# publish for all labels detected for this camera
|
||||
if total_label_count != self.object_counts.get("all"):
|
||||
self.object_counts["all"] = total_label_count
|
||||
for c in self.callbacks["object_status"]:
|
||||
c(self.name, "all", total_label_count)
|
||||
|
||||
# publish active label counts for this camera
|
||||
if total_active_label_count != self.active_object_counts.get("all"):
|
||||
self.active_object_counts["all"] = total_active_label_count
|
||||
for c in self.callbacks["active_object_status"]:
|
||||
c(self.name, "all", total_active_label_count)
|
||||
|
||||
# expire any objects that are >0 and no longer detected
|
||||
expired_objects = [
|
||||
obj_name
|
||||
@@ -828,6 +869,11 @@ class CameraState:
|
||||
self.object_counts[obj_name] = 0
|
||||
for c in self.callbacks["object_status"]:
|
||||
c(self.name, obj_name, 0)
|
||||
# Only publish if the object was previously active.
|
||||
if self.active_object_counts[obj_name] > 0:
|
||||
for c in self.callbacks["active_object_status"]:
|
||||
c(self.name, obj_name, 0)
|
||||
self.active_object_counts[obj_name] = 0
|
||||
for c in self.callbacks["snapshot"]:
|
||||
c(self.name, self.best_objects[obj_name], frame_time)
|
||||
|
||||
@@ -835,7 +881,7 @@ class CameraState:
|
||||
current_thumb_frames = {
|
||||
obj.thumbnail_data["frame_time"]
|
||||
for obj in tracked_objects.values()
|
||||
if not obj.false_positive
|
||||
if not obj.false_positive and obj.thumbnail_data is not None
|
||||
}
|
||||
current_best_frames = {
|
||||
obj.thumbnail_data["frame_time"] for obj in self.best_objects.values()
|
||||
@@ -850,12 +896,16 @@ class CameraState:
|
||||
|
||||
with self.current_frame_lock:
|
||||
self.tracked_objects = tracked_objects
|
||||
self.current_frame_time = frame_time
|
||||
self.motion_boxes = motion_boxes
|
||||
self.regions = regions
|
||||
self._current_frame = current_frame
|
||||
if self.previous_frame_id is not None:
|
||||
self.frame_manager.close(self.previous_frame_id)
|
||||
|
||||
if current_frame is not None:
|
||||
self.current_frame_time = frame_time
|
||||
self._current_frame = current_frame
|
||||
|
||||
if self.previous_frame_id is not None:
|
||||
self.frame_manager.close(self.previous_frame_id)
|
||||
|
||||
self.previous_frame_id = frame_id
|
||||
|
||||
|
||||
@@ -886,6 +936,17 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
|
||||
self.camera_activity: dict[str, dict[str, any]] = {}
|
||||
|
||||
# {
|
||||
# 'zone_name': {
|
||||
# 'person': {
|
||||
# 'camera_1': 2,
|
||||
# 'camera_2': 1
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
self.zone_data = defaultdict(lambda: defaultdict(dict))
|
||||
self.active_zone_data = defaultdict(lambda: defaultdict(dict))
|
||||
|
||||
def start(camera, obj: TrackedObject, current_frame_time):
|
||||
self.event_sender.publish(
|
||||
(
|
||||
@@ -1003,6 +1064,11 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
def object_status(camera, object_name, status):
|
||||
self.dispatcher.publish(f"{camera}/{object_name}", status, retain=False)
|
||||
|
||||
def active_object_status(camera, object_name, status):
|
||||
self.dispatcher.publish(
|
||||
f"{camera}/{object_name}/active", status, retain=False
|
||||
)
|
||||
|
||||
def camera_activity(camera, activity):
|
||||
last_activity = self.camera_activity.get(camera)
|
||||
|
||||
@@ -1020,19 +1086,10 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
camera_state.on("end", end)
|
||||
camera_state.on("snapshot", snapshot)
|
||||
camera_state.on("object_status", object_status)
|
||||
camera_state.on("active_object_status", active_object_status)
|
||||
camera_state.on("camera_activity", camera_activity)
|
||||
self.camera_states[camera] = camera_state
|
||||
|
||||
# {
|
||||
# 'zone_name': {
|
||||
# 'person': {
|
||||
# 'camera_1': 2,
|
||||
# 'camera_2': 1
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
self.zone_data = defaultdict(lambda: defaultdict(dict))
|
||||
|
||||
def should_save_snapshot(self, camera, obj: TrackedObject):
|
||||
if obj.false_positive:
|
||||
return False
|
||||
@@ -1070,25 +1127,29 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
if obj.obj_data["position_changes"] == 0:
|
||||
return False
|
||||
|
||||
# If there are required zones and there is no overlap
|
||||
# If the object is not considered an alert or detection
|
||||
review_config = self.config.cameras[camera].review
|
||||
required_zones = (
|
||||
review_config.alerts.required_zones
|
||||
+ review_config.detections.required_zones
|
||||
)
|
||||
if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
|
||||
logger.debug(
|
||||
f"Not creating clip for {obj.obj_data['id']} because it did not enter required zones"
|
||||
if not (
|
||||
(
|
||||
obj.obj_data["label"] in review_config.alerts.labels
|
||||
and (
|
||||
not review_config.alerts.required_zones
|
||||
or set(obj.entered_zones) & set(review_config.alerts.required_zones)
|
||||
)
|
||||
)
|
||||
or (
|
||||
(
|
||||
not review_config.detections.labels
|
||||
or obj.obj_data["label"] in review_config.detections.labels
|
||||
)
|
||||
and (
|
||||
not review_config.detections.required_zones
|
||||
or set(obj.entered_zones) & set(review_config.alerts.required_zones)
|
||||
)
|
||||
)
|
||||
return False
|
||||
|
||||
# If the required objects are not present
|
||||
if (
|
||||
record_config.events.objects is not None
|
||||
and obj.obj_data["label"] not in record_config.events.objects
|
||||
):
|
||||
logger.debug(
|
||||
f"Not creating clip for {obj.obj_data['id']} because it did not contain required objects"
|
||||
f"Not creating clip for {obj.obj_data['id']} because it did not qualify as an alert or detection"
|
||||
)
|
||||
return False
|
||||
|
||||
@@ -1206,7 +1267,17 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
for obj in camera_state.tracked_objects.values()
|
||||
if zone in obj.current_zones and not obj.false_positive
|
||||
)
|
||||
active_obj_counter = Counter(
|
||||
obj.obj_data["label"]
|
||||
for obj in camera_state.tracked_objects.values()
|
||||
if (
|
||||
zone in obj.current_zones
|
||||
and not obj.false_positive
|
||||
and obj.active
|
||||
)
|
||||
)
|
||||
total_label_count = 0
|
||||
total_active_label_count = 0
|
||||
|
||||
# update counts and publish status
|
||||
for label in set(self.zone_data[zone].keys()) | set(obj_counter.keys()):
|
||||
@@ -1216,41 +1287,67 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
|
||||
# if we have previously published a count for this zone/label
|
||||
zone_label = self.zone_data[zone][label]
|
||||
active_zone_label = self.active_zone_data[zone][label]
|
||||
if camera in zone_label:
|
||||
current_count = sum(zone_label.values())
|
||||
current_active_count = sum(active_zone_label.values())
|
||||
zone_label[camera] = (
|
||||
obj_counter[label] if label in obj_counter else 0
|
||||
)
|
||||
active_zone_label[camera] = (
|
||||
active_obj_counter[label]
|
||||
if label in active_obj_counter
|
||||
else 0
|
||||
)
|
||||
new_count = sum(zone_label.values())
|
||||
new_active_count = sum(active_zone_label.values())
|
||||
if new_count != current_count:
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/{label}",
|
||||
new_count,
|
||||
retain=False,
|
||||
)
|
||||
if new_active_count != current_active_count:
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/{label}/active",
|
||||
new_active_count,
|
||||
retain=False,
|
||||
)
|
||||
|
||||
# Set the count for the /zone/all topic.
|
||||
total_label_count += new_count
|
||||
total_active_label_count += new_active_count
|
||||
|
||||
# if this is a new zone/label combo for this camera
|
||||
else:
|
||||
if label in obj_counter:
|
||||
zone_label[camera] = obj_counter[label]
|
||||
active_zone_label[camera] = active_obj_counter[label]
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/{label}",
|
||||
obj_counter[label],
|
||||
retain=False,
|
||||
)
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/{label}/active",
|
||||
active_obj_counter[label],
|
||||
retain=False,
|
||||
)
|
||||
|
||||
# Set the count for the /zone/all topic.
|
||||
total_label_count += obj_counter[label]
|
||||
total_active_label_count += active_obj_counter[label]
|
||||
|
||||
# if we have previously published a count for this zone all labels
|
||||
zone_label = self.zone_data[zone]["all"]
|
||||
active_zone_label = self.active_zone_data[zone]["all"]
|
||||
if camera in zone_label:
|
||||
current_count = sum(zone_label.values())
|
||||
current_active_count = sum(active_zone_label.values())
|
||||
zone_label[camera] = total_label_count
|
||||
active_zone_label[camera] = total_active_label_count
|
||||
new_count = sum(zone_label.values())
|
||||
new_active_count = sum(active_zone_label.values())
|
||||
|
||||
if new_count != current_count:
|
||||
self.dispatcher.publish(
|
||||
@@ -1258,14 +1355,26 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
new_count,
|
||||
retain=False,
|
||||
)
|
||||
if new_active_count != current_active_count:
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/all/active",
|
||||
new_active_count,
|
||||
retain=False,
|
||||
)
|
||||
# if this is a new zone all label for this camera
|
||||
else:
|
||||
zone_label[camera] = total_label_count
|
||||
active_zone_label[camera] = total_active_label_count
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/all",
|
||||
total_label_count,
|
||||
retain=False,
|
||||
)
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/all/active",
|
||||
total_active_label_count,
|
||||
retain=False,
|
||||
)
|
||||
|
||||
# cleanup event finished queue
|
||||
while not self.stop_event.is_set():
|
||||
|
||||
@@ -15,7 +15,7 @@ import cv2
|
||||
import numpy as np
|
||||
|
||||
from frigate.comms.config_updater import ConfigSubscriber
|
||||
from frigate.config import BirdseyeModeEnum, FrigateConfig
|
||||
from frigate.config import BirdseyeModeEnum, FfmpegConfig, FrigateConfig
|
||||
from frigate.const import BASE_DIR, BIRDSEYE_PIPE
|
||||
from frigate.util.image import (
|
||||
SharedMemoryFrameManager,
|
||||
@@ -112,7 +112,7 @@ class Canvas:
|
||||
class FFMpegConverter(threading.Thread):
|
||||
def __init__(
|
||||
self,
|
||||
camera: str,
|
||||
ffmpeg: FfmpegConfig,
|
||||
input_queue: queue.Queue,
|
||||
stop_event: mp.Event,
|
||||
in_width: int,
|
||||
@@ -123,8 +123,8 @@ class FFMpegConverter(threading.Thread):
|
||||
birdseye_rtsp: bool = False,
|
||||
):
|
||||
threading.Thread.__init__(self)
|
||||
self.name = f"{camera}_output_converter"
|
||||
self.camera = camera
|
||||
self.name = "birdseye_output_converter"
|
||||
self.camera = "birdseye"
|
||||
self.input_queue = input_queue
|
||||
self.stop_event = stop_event
|
||||
self.bd_pipe = None
|
||||
@@ -133,7 +133,7 @@ class FFMpegConverter(threading.Thread):
|
||||
self.recreate_birdseye_pipe()
|
||||
|
||||
ffmpeg_cmd = [
|
||||
"ffmpeg",
|
||||
ffmpeg.ffmpeg_path,
|
||||
"-threads",
|
||||
"1",
|
||||
"-f",
|
||||
@@ -357,16 +357,15 @@ class BirdsEyeFrameManager:
|
||||
frame = None
|
||||
channel_dims = None
|
||||
else:
|
||||
try:
|
||||
frame = self.frame_manager.get(
|
||||
f"{camera}{frame_time}", self.config.cameras[camera].frame_shape_yuv
|
||||
)
|
||||
except FileNotFoundError:
|
||||
# TODO: better frame management would prevent this edge case
|
||||
logger.warning(
|
||||
f"Unable to copy frame {camera}{frame_time} to birdseye."
|
||||
)
|
||||
frame_id = f"{camera}{frame_time}"
|
||||
frame = self.frame_manager.get(
|
||||
frame_id, self.config.cameras[camera].frame_shape_yuv
|
||||
)
|
||||
|
||||
if frame is None:
|
||||
logger.debug(f"Unable to copy frame {camera}{frame_time} to birdseye.")
|
||||
return
|
||||
|
||||
channel_dims = self.cameras[camera]["channel_dims"]
|
||||
|
||||
copy_yuv_to_position(
|
||||
@@ -377,6 +376,8 @@ class BirdsEyeFrameManager:
|
||||
channel_dims,
|
||||
)
|
||||
|
||||
self.frame_manager.close(frame_id)
|
||||
|
||||
def camera_active(self, mode, object_box_count, motion_box_count):
|
||||
if mode == BirdseyeModeEnum.continuous:
|
||||
return True
|
||||
@@ -718,14 +719,13 @@ class Birdseye:
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
frame_manager: SharedMemoryFrameManager,
|
||||
stop_event: mp.Event,
|
||||
websocket_server,
|
||||
) -> None:
|
||||
self.config = config
|
||||
self.input = queue.Queue(maxsize=10)
|
||||
self.converter = FFMpegConverter(
|
||||
"birdseye",
|
||||
config.ffmpeg,
|
||||
self.input,
|
||||
stop_event,
|
||||
config.birdseye.width,
|
||||
@@ -738,6 +738,7 @@ class Birdseye:
|
||||
self.broadcaster = BroadcastThread(
|
||||
"birdseye", self.converter, websocket_server, stop_event
|
||||
)
|
||||
frame_manager = SharedMemoryFrameManager()
|
||||
self.birdseye_manager = BirdsEyeFrameManager(config, frame_manager, stop_event)
|
||||
self.config_subscriber = ConfigSubscriber("config/birdseye/")
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import queue
|
||||
import subprocess as sp
|
||||
import threading
|
||||
|
||||
from frigate.config import CameraConfig
|
||||
from frigate.config import CameraConfig, FfmpegConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -15,6 +15,7 @@ class FFMpegConverter(threading.Thread):
|
||||
def __init__(
|
||||
self,
|
||||
camera: str,
|
||||
ffmpeg: FfmpegConfig,
|
||||
input_queue: queue.Queue,
|
||||
stop_event: mp.Event,
|
||||
in_width: int,
|
||||
@@ -30,7 +31,7 @@ class FFMpegConverter(threading.Thread):
|
||||
self.stop_event = stop_event
|
||||
|
||||
ffmpeg_cmd = [
|
||||
"ffmpeg",
|
||||
ffmpeg.ffmpeg_path,
|
||||
"-threads",
|
||||
"1",
|
||||
"-f",
|
||||
@@ -142,6 +143,7 @@ class JsmpegCamera:
|
||||
)
|
||||
self.converter = FFMpegConverter(
|
||||
config.name,
|
||||
config.ffmpeg,
|
||||
self.input,
|
||||
stop_event,
|
||||
config.frame_shape[1],
|
||||
|
||||
@@ -45,7 +45,6 @@ def output_frames(
|
||||
signal.signal(signal.SIGINT, receiveSignal)
|
||||
|
||||
frame_manager = SharedMemoryFrameManager()
|
||||
previous_frames = {}
|
||||
|
||||
# start a websocket server on 8082
|
||||
WebSocketWSGIHandler.http_version = "1.1"
|
||||
@@ -64,6 +63,7 @@ def output_frames(
|
||||
jsmpeg_cameras: dict[str, JsmpegCamera] = {}
|
||||
birdseye: Optional[Birdseye] = None
|
||||
preview_recorders: dict[str, PreviewRecorder] = {}
|
||||
preview_write_times: dict[str, float] = {}
|
||||
|
||||
move_preview_frames("cache")
|
||||
|
||||
@@ -73,9 +73,10 @@ def output_frames(
|
||||
|
||||
jsmpeg_cameras[camera] = JsmpegCamera(cam_config, stop_event, websocket_server)
|
||||
preview_recorders[camera] = PreviewRecorder(cam_config)
|
||||
preview_write_times[camera] = 0
|
||||
|
||||
if config.birdseye.enabled:
|
||||
birdseye = Birdseye(config, frame_manager, stop_event, websocket_server)
|
||||
birdseye = Birdseye(config, stop_event, websocket_server)
|
||||
|
||||
websocket_thread.start()
|
||||
|
||||
@@ -97,6 +98,10 @@ def output_frames(
|
||||
|
||||
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
|
||||
|
||||
if frame is None:
|
||||
logger.debug(f"Failed to get frame {frame_id} from SHM")
|
||||
continue
|
||||
|
||||
# send camera frame to ffmpeg process if websockets are connected
|
||||
if any(
|
||||
ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager
|
||||
@@ -121,15 +126,21 @@ def output_frames(
|
||||
)
|
||||
|
||||
# send frames for low fps recording
|
||||
preview_recorders[camera].write_data(
|
||||
generated_preview = preview_recorders[camera].write_data(
|
||||
current_tracked_objects, motion_boxes, frame_time, frame
|
||||
)
|
||||
preview_write_times[camera] = frame_time
|
||||
|
||||
# delete frames after they have been used for output
|
||||
if camera in previous_frames:
|
||||
frame_manager.delete(f"{camera}{previous_frames[camera]}")
|
||||
# if another camera generated a preview,
|
||||
# check for any cameras that are currently offline
|
||||
# and need to generate a preview
|
||||
if generated_preview:
|
||||
for camera, time in preview_write_times.copy().items():
|
||||
if time != 0 and frame_time - time > 10:
|
||||
preview_recorders[camera].flag_offline(frame_time)
|
||||
preview_write_times[camera] = frame_time
|
||||
|
||||
previous_frames[camera] = frame_time
|
||||
frame_manager.close(frame_id)
|
||||
|
||||
move_preview_frames("clips")
|
||||
|
||||
@@ -149,7 +160,7 @@ def output_frames(
|
||||
|
||||
frame_id = f"{camera}{frame_time}"
|
||||
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
|
||||
frame_manager.delete(frame_id)
|
||||
frame_manager.close(frame_id)
|
||||
|
||||
detection_subscriber.stop()
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import subprocess as sp
|
||||
import threading
|
||||
import time
|
||||
@@ -11,6 +12,7 @@ from pathlib import Path
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from frigate.comms.config_updater import ConfigSubscriber
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import CameraConfig, RecordQualityEnum
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, INSERT_PREVIEW, PREVIEW_FRAME_TYPE
|
||||
@@ -76,6 +78,7 @@ class FFMpegConverter(threading.Thread):
|
||||
|
||||
# write a PREVIEW at fps and 1 key frame per clip
|
||||
self.ffmpeg_cmd = parse_preset_hardware_acceleration_encode(
|
||||
config.ffmpeg.ffmpeg_path,
|
||||
config.ffmpeg.hwaccel_args,
|
||||
input="-f concat -y -protocol_whitelist pipe,file -safe 0 -threads 1 -i /dev/stdin",
|
||||
output=f"-threads 1 -g {PREVIEW_KEYFRAME_INTERVAL} -bf 0 -b:v {PREVIEW_QUALITY_BIT_RATES[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}",
|
||||
@@ -169,6 +172,7 @@ class PreviewRecorder:
|
||||
|
||||
# create communication for finished previews
|
||||
self.requestor = InterProcessRequestor()
|
||||
self.config_subscriber = ConfigSubscriber(f"config/record/{self.config.name}")
|
||||
|
||||
y, u1, u2, v1, v2 = get_yuv_crop(
|
||||
self.config.frame_shape_yuv,
|
||||
@@ -242,6 +246,9 @@ class PreviewRecorder:
|
||||
frame_time: float,
|
||||
) -> bool:
|
||||
"""Decide if this frame should be added to PREVIEW."""
|
||||
if not self.config.record.enabled:
|
||||
return False
|
||||
|
||||
active_objs = get_active_objects(
|
||||
frame_time, self.config, current_tracked_objects
|
||||
)
|
||||
@@ -298,19 +305,71 @@ class PreviewRecorder:
|
||||
motion_boxes: list[list[int]],
|
||||
frame_time: float,
|
||||
frame,
|
||||
) -> None:
|
||||
) -> bool:
|
||||
# check for updated record config
|
||||
_, updated_record_config = self.config_subscriber.check_for_update()
|
||||
|
||||
if updated_record_config:
|
||||
self.config.record = updated_record_config
|
||||
|
||||
# always write the first frame
|
||||
if self.start_time == 0:
|
||||
self.start_time = frame_time
|
||||
self.output_frames.append(frame_time)
|
||||
self.write_frame_to_cache(frame_time, frame)
|
||||
return
|
||||
return False
|
||||
|
||||
# check if PREVIEW clip should be generated and cached frames reset
|
||||
if frame_time >= self.segment_end:
|
||||
# save last frame to ensure consistent duration
|
||||
if len(self.output_frames) > 0:
|
||||
# save last frame to ensure consistent duration
|
||||
if self.config.record:
|
||||
self.output_frames.append(frame_time)
|
||||
self.write_frame_to_cache(frame_time, frame)
|
||||
|
||||
# write the preview if any frames exist for this hour
|
||||
FFMpegConverter(
|
||||
self.config,
|
||||
self.output_frames,
|
||||
self.requestor,
|
||||
).start()
|
||||
|
||||
# reset frame cache
|
||||
self.segment_end = (
|
||||
(datetime.datetime.now() + datetime.timedelta(hours=1))
|
||||
.astimezone(datetime.timezone.utc)
|
||||
.replace(minute=0, second=0, microsecond=0)
|
||||
.timestamp()
|
||||
)
|
||||
self.start_time = frame_time
|
||||
self.last_output_time = frame_time
|
||||
self.output_frames: list[float] = []
|
||||
|
||||
# include first frame to ensure consistent duration
|
||||
if self.config.record.enabled:
|
||||
self.output_frames.append(frame_time)
|
||||
self.write_frame_to_cache(frame_time, frame)
|
||||
|
||||
return True
|
||||
elif self.should_write_frame(current_tracked_objects, motion_boxes, frame_time):
|
||||
self.output_frames.append(frame_time)
|
||||
self.write_frame_to_cache(frame_time, frame)
|
||||
return False
|
||||
|
||||
def flag_offline(self, frame_time: float) -> None:
|
||||
# check if PREVIEW clip should be generated and cached frames reset
|
||||
if frame_time >= self.segment_end:
|
||||
if len(self.output_frames) == 0:
|
||||
return
|
||||
|
||||
old_frame_path = get_cache_image_name(
|
||||
self.config.name, self.output_frames[-1]
|
||||
)
|
||||
new_frame_path = get_cache_image_name(self.config.name, frame_time)
|
||||
shutil.copy(old_frame_path, new_frame_path)
|
||||
|
||||
# save last frame to ensure consistent duration
|
||||
self.output_frames.append(frame_time)
|
||||
FFMpegConverter(
|
||||
self.config,
|
||||
self.output_frames,
|
||||
@@ -328,13 +387,6 @@ class PreviewRecorder:
|
||||
self.last_output_time = frame_time
|
||||
self.output_frames = []
|
||||
|
||||
# include first frame to ensure consistent duration
|
||||
self.output_frames.append(frame_time)
|
||||
self.write_frame_to_cache(frame_time, frame)
|
||||
elif self.should_write_frame(current_tracked_objects, motion_boxes, frame_time):
|
||||
self.output_frames.append(frame_time)
|
||||
self.write_frame_to_cache(frame_time, frame)
|
||||
|
||||
def stop(self) -> None:
|
||||
self.requestor.stop()
|
||||
|
||||
|
||||
@@ -99,6 +99,10 @@ class PtzMotionEstimator:
|
||||
frame_id, self.camera_config.frame_shape_yuv
|
||||
)
|
||||
|
||||
if yuv_frame is None:
|
||||
self.coord_transformations = None
|
||||
return None
|
||||
|
||||
frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2GRAY_I420)
|
||||
|
||||
# mask out detections for better motion estimation
|
||||
@@ -264,9 +268,9 @@ class PtzAutoTracker:
|
||||
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
|
||||
return
|
||||
|
||||
movestatus_supported = self.onvif.get_service_capabilities(camera)
|
||||
move_status_supported = self.onvif.get_service_capabilities(camera)
|
||||
|
||||
if movestatus_supported is None or movestatus_supported.lower() != "true":
|
||||
if move_status_supported is None or move_status_supported.lower() != "true":
|
||||
logger.warning(
|
||||
f"Disabling autotracking for {camera}: ONVIF MoveStatus not supported"
|
||||
)
|
||||
@@ -803,8 +807,8 @@ class PtzAutoTracker:
|
||||
invalid_delta = np.any(delta > delta_thresh)
|
||||
|
||||
# Check variance
|
||||
stdevs = np.std(velocities, axis=0)
|
||||
high_variances = np.any(stdevs > var_thresh)
|
||||
stdev_list = np.std(velocities, axis=0)
|
||||
high_variances = np.any(stdev_list > var_thresh)
|
||||
|
||||
# Check direction difference
|
||||
velocities = np.round(velocities)
|
||||
|
||||
@@ -8,6 +8,7 @@ from pathlib import Path
|
||||
import numpy
|
||||
from onvif import ONVIFCamera, ONVIFError
|
||||
from zeep.exceptions import Fault, TransportError
|
||||
from zeep.transports import Transport
|
||||
|
||||
from frigate.config import FrigateConfig, ZoomingModeEnum
|
||||
from frigate.types import PTZMetricsTypes
|
||||
@@ -45,6 +46,7 @@ class OnvifController:
|
||||
|
||||
if cam.onvif.host:
|
||||
try:
|
||||
transport = Transport(timeout=10, operation_timeout=10)
|
||||
self.cams[cam_name] = {
|
||||
"onvif": ONVIFCamera(
|
||||
cam.onvif.host,
|
||||
@@ -55,6 +57,7 @@ class OnvifController:
|
||||
Path(find_spec("onvif").origin).parent / "wsdl"
|
||||
).replace("dist-packages/onvif", "site-packages"),
|
||||
adjust_time=cam.onvif.ignore_time_mismatch,
|
||||
transport=transport,
|
||||
),
|
||||
"init": False,
|
||||
"active": False,
|
||||
@@ -332,6 +335,10 @@ class OnvifController:
|
||||
)
|
||||
self._stop(camera_name)
|
||||
|
||||
if "pt" not in self.cams[camera_name]["features"]:
|
||||
logger.error(f"{camera_name} does not support ONVIF pan/tilt movement.")
|
||||
return
|
||||
|
||||
self.cams[camera_name]["active"] = True
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
move_request = self.cams[camera_name]["move_request"]
|
||||
@@ -473,6 +480,10 @@ class OnvifController:
|
||||
)
|
||||
self._stop(camera_name)
|
||||
|
||||
if "zoom" not in self.cams[camera_name]["features"]:
|
||||
logger.error(f"{camera_name} does not support ONVIF zooming.")
|
||||
return
|
||||
|
||||
self.cams[camera_name]["active"] = True
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
move_request = self.cams[camera_name]["move_request"]
|
||||
|
||||
@@ -12,7 +12,7 @@ from playhouse.sqlite_ext import SqliteExtDatabase
|
||||
|
||||
from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR
|
||||
from frigate.models import Event, Previews, Recordings, ReviewSegment
|
||||
from frigate.models import Previews, Recordings, ReviewSegment
|
||||
from frigate.record.util import remove_empty_directories, sync_recordings
|
||||
from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time
|
||||
|
||||
@@ -61,8 +61,39 @@ class RecordingCleanup(threading.Thread):
|
||||
db.execute_sql("PRAGMA wal_checkpoint(TRUNCATE);")
|
||||
db.close()
|
||||
|
||||
def expire_review_segments(self, config: CameraConfig, now: datetime) -> None:
|
||||
"""Delete review segments that are expired"""
|
||||
alert_expire_date = (
|
||||
now - datetime.timedelta(days=config.record.alerts.retain.days)
|
||||
).timestamp()
|
||||
detection_expire_date = (
|
||||
now - datetime.timedelta(days=config.record.detections.retain.days)
|
||||
).timestamp()
|
||||
expired_reviews: ReviewSegment = (
|
||||
ReviewSegment.select(ReviewSegment.id)
|
||||
.where(ReviewSegment.camera == config.name)
|
||||
.where(
|
||||
(
|
||||
(ReviewSegment.severity == "alert")
|
||||
& (ReviewSegment.end_time < alert_expire_date)
|
||||
)
|
||||
| (
|
||||
(ReviewSegment.severity == "detection")
|
||||
& (ReviewSegment.end_time < detection_expire_date)
|
||||
)
|
||||
)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
max_deletes = 100000
|
||||
deleted_reviews_list = list(map(lambda x: x[0], expired_reviews))
|
||||
for i in range(0, len(deleted_reviews_list), max_deletes):
|
||||
ReviewSegment.delete().where(
|
||||
ReviewSegment.id << deleted_reviews_list[i : i + max_deletes]
|
||||
).execute()
|
||||
|
||||
def expire_existing_camera_recordings(
|
||||
self, expire_date: float, config: CameraConfig, events: Event
|
||||
self, expire_date: float, config: CameraConfig, reviews: ReviewSegment
|
||||
) -> None:
|
||||
"""Delete recordings for existing camera based on retention config."""
|
||||
# Get the timestamp for cutoff of retained days
|
||||
@@ -86,47 +117,47 @@ class RecordingCleanup(threading.Thread):
|
||||
.iterator()
|
||||
)
|
||||
|
||||
# loop over recordings and see if they overlap with any non-expired events
|
||||
# loop over recordings and see if they overlap with any non-expired reviews
|
||||
# TODO: expire segments based on segment stats according to config
|
||||
event_start = 0
|
||||
review_start = 0
|
||||
deleted_recordings = set()
|
||||
kept_recordings: list[tuple[float, float]] = []
|
||||
for recording in recordings:
|
||||
keep = False
|
||||
mode = None
|
||||
# Now look for a reason to keep this recording segment
|
||||
for idx in range(event_start, len(events)):
|
||||
event: Event = events[idx]
|
||||
for idx in range(review_start, len(reviews)):
|
||||
review: ReviewSegment = reviews[idx]
|
||||
|
||||
# if the event starts in the future, stop checking events
|
||||
# if the review starts in the future, stop checking reviews
|
||||
# and let this recording segment expire
|
||||
if event.start_time > recording.end_time:
|
||||
if review.start_time > recording.end_time:
|
||||
keep = False
|
||||
break
|
||||
|
||||
# if the event is in progress or ends after the recording starts, keep it
|
||||
# and stop looking at events
|
||||
if event.end_time is None or event.end_time >= recording.start_time:
|
||||
# if the review is in progress or ends after the recording starts, keep it
|
||||
# and stop looking at reviews
|
||||
if review.end_time is None or review.end_time >= recording.start_time:
|
||||
keep = True
|
||||
mode = (
|
||||
config.record.alerts.retain.mode
|
||||
if review.severity == "alert"
|
||||
else config.record.detections.retain.mode
|
||||
)
|
||||
break
|
||||
|
||||
# if the event ends before this recording segment starts, skip
|
||||
# this event and check the next event for an overlap.
|
||||
# since the events and recordings are sorted, we can skip events
|
||||
# if the review ends before this recording segment starts, skip
|
||||
# this review and check the next review for an overlap.
|
||||
# since the review and recordings are sorted, we can skip review
|
||||
# that end before the previous recording segment started on future segments
|
||||
if event.end_time < recording.start_time:
|
||||
event_start = idx
|
||||
if review.end_time < recording.start_time:
|
||||
review_start = idx
|
||||
|
||||
# Delete recordings outside of the retention window or based on the retention mode
|
||||
if (
|
||||
not keep
|
||||
or (
|
||||
config.record.events.retain.mode == RetainModeEnum.motion
|
||||
and recording.motion == 0
|
||||
)
|
||||
or (
|
||||
config.record.events.retain.mode == RetainModeEnum.active_objects
|
||||
and recording.objects == 0
|
||||
)
|
||||
or (mode == RetainModeEnum.motion and recording.motion == 0)
|
||||
or (mode == RetainModeEnum.active_objects and recording.objects == 0)
|
||||
):
|
||||
Path(recording.path).unlink(missing_ok=True)
|
||||
deleted_recordings.add(recording.id)
|
||||
@@ -202,65 +233,6 @@ class RecordingCleanup(threading.Thread):
|
||||
Previews.id << deleted_previews_list[i : i + max_deletes]
|
||||
).execute()
|
||||
|
||||
review_segments: list[ReviewSegment] = (
|
||||
ReviewSegment.select(
|
||||
ReviewSegment.id,
|
||||
ReviewSegment.start_time,
|
||||
ReviewSegment.end_time,
|
||||
ReviewSegment.thumb_path,
|
||||
)
|
||||
.where(
|
||||
ReviewSegment.camera == config.name,
|
||||
ReviewSegment.end_time < expire_date,
|
||||
)
|
||||
.order_by(ReviewSegment.start_time)
|
||||
.namedtuples()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
# expire review segments
|
||||
recording_start = 0
|
||||
deleted_segments = set()
|
||||
for segment in review_segments:
|
||||
keep = False
|
||||
# look for a reason to keep this segment
|
||||
for idx in range(recording_start, len(kept_recordings)):
|
||||
start_time, end_time = kept_recordings[idx]
|
||||
|
||||
# if the recording starts in the future, stop checking recordings
|
||||
# and let this segment expire
|
||||
if start_time > segment.end_time:
|
||||
keep = False
|
||||
break
|
||||
|
||||
# if the recording ends after the segment starts, keep it
|
||||
# and stop looking at recordings
|
||||
if end_time >= segment.start_time:
|
||||
keep = True
|
||||
break
|
||||
|
||||
# if the recording ends before this segment starts, skip
|
||||
# this recording and check the next recording for an overlap.
|
||||
# since the kept recordings and segments are sorted, we can skip recordings
|
||||
# that end before the current segment started
|
||||
if end_time < segment.start_time:
|
||||
recording_start = idx
|
||||
|
||||
# Delete segments without any relevant recordings
|
||||
if not keep:
|
||||
Path(segment.thumb_path).unlink(missing_ok=True)
|
||||
deleted_segments.add(segment.id)
|
||||
|
||||
# expire segments
|
||||
logger.debug(f"Expiring {len(deleted_segments)} segments")
|
||||
# delete up to 100,000 at a time
|
||||
max_deletes = 100000
|
||||
deleted_segments_list = list(deleted_segments)
|
||||
for i in range(0, len(deleted_segments_list), max_deletes):
|
||||
ReviewSegment.delete().where(
|
||||
ReviewSegment.id << deleted_segments_list[i : i + max_deletes]
|
||||
).execute()
|
||||
|
||||
def expire_recordings(self) -> None:
|
||||
"""Delete recordings based on retention config."""
|
||||
logger.debug("Start expire recordings.")
|
||||
@@ -302,30 +274,31 @@ class RecordingCleanup(threading.Thread):
|
||||
logger.debug("Start all cameras.")
|
||||
for camera, config in self.config.cameras.items():
|
||||
logger.debug(f"Start camera: {camera}.")
|
||||
now = datetime.datetime.now()
|
||||
|
||||
self.expire_review_segments(config, now)
|
||||
|
||||
expire_days = config.record.retain.days
|
||||
expire_date = (
|
||||
datetime.datetime.now() - datetime.timedelta(days=expire_days)
|
||||
).timestamp()
|
||||
expire_date = (now - datetime.timedelta(days=expire_days)).timestamp()
|
||||
|
||||
# Get all the events to check against
|
||||
events: Event = (
|
||||
Event.select(
|
||||
Event.start_time,
|
||||
Event.end_time,
|
||||
# Get all the reviews to check against
|
||||
reviews: ReviewSegment = (
|
||||
ReviewSegment.select(
|
||||
ReviewSegment.start_time,
|
||||
ReviewSegment.end_time,
|
||||
ReviewSegment.severity,
|
||||
)
|
||||
.where(
|
||||
Event.camera == camera,
|
||||
# need to ensure segments for all events starting
|
||||
ReviewSegment.camera == camera,
|
||||
# need to ensure segments for all reviews starting
|
||||
# before the expire date are included
|
||||
Event.start_time < expire_date,
|
||||
Event.has_clip,
|
||||
ReviewSegment.start_time < expire_date,
|
||||
)
|
||||
.order_by(Event.start_time)
|
||||
.order_by(ReviewSegment.start_time)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
self.expire_existing_camera_recordings(expire_date, config, events)
|
||||
self.expire_existing_camera_recordings(expire_date, config, reviews)
|
||||
logger.debug(f"End camera: {camera}.")
|
||||
|
||||
logger.debug("End all cameras.")
|
||||
|
||||
@@ -14,7 +14,7 @@ from typing import Optional
|
||||
|
||||
from peewee import DoesNotExist
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config import FfmpegConfig, FrigateConfig
|
||||
from frigate.const import (
|
||||
CACHE_DIR,
|
||||
CLIPS_DIR,
|
||||
@@ -116,7 +116,7 @@ class RecordingExporter(threading.Thread):
|
||||
minutes = int(diff / 60)
|
||||
seconds = int(diff % 60)
|
||||
ffmpeg_cmd = [
|
||||
"ffmpeg",
|
||||
self.config.ffmpeg.ffmpeg_path,
|
||||
"-hide_banner",
|
||||
"-loglevel",
|
||||
"warning",
|
||||
@@ -230,11 +230,12 @@ class RecordingExporter(threading.Thread):
|
||||
|
||||
if self.playback_factor == PlaybackFactorEnum.realtime:
|
||||
ffmpeg_cmd = (
|
||||
f"ffmpeg -hide_banner {ffmpeg_input} -c copy -movflags +faststart {video_path}"
|
||||
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart {video_path}"
|
||||
).split(" ")
|
||||
elif self.playback_factor == PlaybackFactorEnum.timelapse_25x:
|
||||
ffmpeg_cmd = (
|
||||
parse_preset_hardware_acceleration_encode(
|
||||
self.config.ffmpeg.ffmpeg_path,
|
||||
self.config.ffmpeg.hwaccel_args,
|
||||
f"{TIMELAPSE_DATA_INPUT_ARGS} {ffmpeg_input}",
|
||||
f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart {video_path}",
|
||||
@@ -267,7 +268,7 @@ class RecordingExporter(threading.Thread):
|
||||
logger.debug(f"Finished exporting {video_path}")
|
||||
|
||||
|
||||
def migrate_exports(camera_names: list[str]):
|
||||
def migrate_exports(ffmpeg: FfmpegConfig, camera_names: list[str]):
|
||||
Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True)
|
||||
|
||||
exports = []
|
||||
@@ -286,7 +287,7 @@ def migrate_exports(camera_names: list[str]):
|
||||
) # use jpg because webp encoder can't get quality low enough
|
||||
|
||||
ffmpeg_cmd = [
|
||||
"ffmpeg",
|
||||
ffmpeg.ffmpeg_path,
|
||||
"-hide_banner",
|
||||
"-loglevel",
|
||||
"warning",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user