forked from Github/frigate
Compare commits
178 Commits
dependabot
...
v0.15.0-be
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e76f4e9bd9 | ||
|
|
0df091f387 | ||
|
|
66277fbb6c | ||
|
|
a67ff3843a | ||
|
|
9ae839ad72 | ||
|
|
66f71aecf7 | ||
|
|
0b203a3673 | ||
|
|
26c3f9f914 | ||
|
|
474c248c9d | ||
|
|
5b1b6b5be0 | ||
|
|
45e9030358 | ||
|
|
f9c1600f0d | ||
|
|
ad85f8882b | ||
|
|
206ed06905 | ||
|
|
e407ba47c2 | ||
|
|
7fdf42a56f | ||
|
|
4eea541352 | ||
|
|
ed9c67804a | ||
|
|
9c20cd5f7b | ||
|
|
6c86827d3a | ||
|
|
d2b2f3d54d | ||
|
|
64b3397f8e | ||
|
|
0829517b72 | ||
|
|
c1bfc1df67 | ||
|
|
96c0c43dc8 | ||
|
|
a68c7f4ef8 | ||
|
|
7c474e6827 | ||
|
|
143bab87f1 | ||
|
|
580f35112e | ||
|
|
3249ffb273 | ||
|
|
7bae9463b2 | ||
|
|
ae30ac6e3c | ||
|
|
46ed520886 | ||
|
|
ace02a6dfa | ||
|
|
0d59754be2 | ||
|
|
15bd26c9b1 | ||
|
|
bc371acb3e | ||
|
|
2eb5fbf112 | ||
|
|
fc0fb158d5 | ||
|
|
404807c697 | ||
|
|
29ea7c53f2 | ||
|
|
1fc4af9c86 | ||
|
|
ac762762c3 | ||
|
|
553676aade | ||
|
|
a13b9815f6 | ||
|
|
156e7cc628 | ||
|
|
959ca0f412 | ||
|
|
9755fa0537 | ||
|
|
77ec86d31a | ||
|
|
189d4b459f | ||
|
|
44f40966e7 | ||
|
|
7d3313e732 | ||
|
|
591b50dfa7 | ||
|
|
27ef661fec | ||
|
|
d7935abc14 | ||
|
|
11068aa9d0 | ||
|
|
1234003527 | ||
|
|
e5ebf938f6 | ||
|
|
8c2c07fd18 | ||
|
|
9e1a50c3be | ||
|
|
ac8ddada0b | ||
|
|
885485da70 | ||
|
|
bb4e863e87 | ||
|
|
c7a4220d65 | ||
|
|
03dd9b2d42 | ||
|
|
89ca085b94 | ||
|
|
fffd9defea | ||
|
|
d10fea6012 | ||
|
|
ab26aee8b2 | ||
|
|
bb80a7b2ee | ||
|
|
e4a6b29279 | ||
|
|
d12c7809dd | ||
|
|
357ce0382e | ||
|
|
73da3d9b20 | ||
|
|
e67b7a6d5e | ||
|
|
4e25bebdd0 | ||
|
|
abd22d2566 | ||
|
|
8aeb597780 | ||
|
|
33825f6d96 | ||
|
|
eca504cb07 | ||
|
|
4c75440af4 | ||
|
|
94f7528885 | ||
|
|
4dadf6d353 | ||
|
|
2d27e72ed9 | ||
|
|
4ff0c8a8d1 | ||
|
|
f9fba94863 | ||
|
|
f9b246dbd0 | ||
|
|
8fefded8dc | ||
|
|
18824830fd | ||
|
|
fa81d87dc0 | ||
|
|
8bc145472a | ||
|
|
7afc1e9762 | ||
|
|
fc59c83e16 | ||
|
|
e4048be088 | ||
|
|
d715a8c290 | ||
|
|
ad308252a1 | ||
|
|
c7d9f83638 | ||
|
|
828fdbfd2d | ||
|
|
40c6fda19d | ||
|
|
b69816c2f9 | ||
|
|
46f5234bd9 | ||
|
|
81b8d7a66b | ||
|
|
b1285a16c1 | ||
|
|
90140e7710 | ||
|
|
8364e68667 | ||
|
|
4bb420d049 | ||
|
|
560dc68120 | ||
|
|
8fcb8e54f7 | ||
|
|
6c70e56059 | ||
|
|
b24d292ade | ||
|
|
2137de37b9 | ||
|
|
3c591ad8a9 | ||
|
|
b56f4c4558 | ||
|
|
5d8bcb42c6 | ||
|
|
b299652e86 | ||
|
|
8ac4b001a2 | ||
|
|
6294ce7807 | ||
|
|
8173cd7776 | ||
|
|
edaccd86d6 | ||
|
|
5f77408956 | ||
|
|
e836523bc3 | ||
|
|
9f866be110 | ||
|
|
f6879f40b0 | ||
|
|
06f47f262f | ||
|
|
eda52a3b82 | ||
|
|
3f1ab66899 | ||
|
|
af844ea9d5 | ||
|
|
b75efcbca2 | ||
|
|
25043278ab | ||
|
|
644069fb23 | ||
|
|
0eccb6a610 | ||
|
|
0abd514064 | ||
|
|
3879fde06d | ||
|
|
887433fc6a | ||
|
|
dd7a07bd0d | ||
|
|
0ee32cf110 | ||
|
|
72aa68cedc | ||
|
|
9adffa1ef5 | ||
|
|
4ca267ea17 | ||
|
|
833768172d | ||
|
|
1ec459ea3a | ||
|
|
66d0ad5803 | ||
|
|
92ac025e43 | ||
|
|
e8b2fde753 | ||
|
|
0fc7999780 | ||
|
|
3a403392e7 | ||
|
|
acccc6fd93 | ||
|
|
40bb4765d4 | ||
|
|
48c60621b6 | ||
|
|
51509760e3 | ||
|
|
1e1610671e | ||
|
|
de86c37687 | ||
|
|
6e332bbdf8 | ||
|
|
8a8a0c7dec | ||
|
|
d4b9b5a7dd | ||
|
|
6df541e1fd | ||
|
|
748087483c | ||
|
|
ae91fa6a39 | ||
|
|
2897afce41 | ||
|
|
ee8091ba91 | ||
|
|
30b5faebae | ||
|
|
8d753f821d | ||
|
|
54eb03d2a1 | ||
|
|
dd6276e706 | ||
|
|
f67ec241d4 | ||
|
|
8ade85edec | ||
|
|
a2ca18a714 | ||
|
|
6a83ff2511 | ||
|
|
bc3a06178b | ||
|
|
9fda259c0c | ||
|
|
d4925622f9 | ||
|
|
dbeaf43b8f | ||
|
|
f86957e5e1 | ||
|
|
a2f42d51fd | ||
|
|
0b71cfaf06 | ||
|
|
d558ac83b6 | ||
|
|
2a15b95f18 | ||
|
|
039ab1ccd7 |
@@ -12,6 +12,7 @@ argmax
|
|||||||
argmin
|
argmin
|
||||||
argpartition
|
argpartition
|
||||||
ascontiguousarray
|
ascontiguousarray
|
||||||
|
astype
|
||||||
authelia
|
authelia
|
||||||
authentik
|
authentik
|
||||||
autodetected
|
autodetected
|
||||||
@@ -42,6 +43,7 @@ codeproject
|
|||||||
colormap
|
colormap
|
||||||
colorspace
|
colorspace
|
||||||
comms
|
comms
|
||||||
|
coro
|
||||||
ctypeslib
|
ctypeslib
|
||||||
CUDA
|
CUDA
|
||||||
Cuvid
|
Cuvid
|
||||||
@@ -59,6 +61,7 @@ dsize
|
|||||||
dtype
|
dtype
|
||||||
ECONNRESET
|
ECONNRESET
|
||||||
edgetpu
|
edgetpu
|
||||||
|
fastapi
|
||||||
faststart
|
faststart
|
||||||
fflags
|
fflags
|
||||||
ffprobe
|
ffprobe
|
||||||
@@ -193,6 +196,7 @@ poweroff
|
|||||||
preexec
|
preexec
|
||||||
probesize
|
probesize
|
||||||
protobuf
|
protobuf
|
||||||
|
pstate
|
||||||
psutil
|
psutil
|
||||||
pubkey
|
pubkey
|
||||||
putenv
|
putenv
|
||||||
@@ -212,6 +216,7 @@ rcond
|
|||||||
RDONLY
|
RDONLY
|
||||||
rebranded
|
rebranded
|
||||||
referer
|
referer
|
||||||
|
reindex
|
||||||
Reolink
|
Reolink
|
||||||
restream
|
restream
|
||||||
restreamed
|
restreamed
|
||||||
@@ -236,6 +241,7 @@ sleeptime
|
|||||||
SNDMORE
|
SNDMORE
|
||||||
socs
|
socs
|
||||||
sqliteq
|
sqliteq
|
||||||
|
sqlitevecq
|
||||||
ssdlite
|
ssdlite
|
||||||
statm
|
statm
|
||||||
stimeout
|
stimeout
|
||||||
@@ -270,9 +276,11 @@ unraid
|
|||||||
unreviewed
|
unreviewed
|
||||||
userdata
|
userdata
|
||||||
usermod
|
usermod
|
||||||
|
uvicorn
|
||||||
vaapi
|
vaapi
|
||||||
vainfo
|
vainfo
|
||||||
variations
|
variations
|
||||||
|
vbios
|
||||||
vconcat
|
vconcat
|
||||||
vitb
|
vitb
|
||||||
vstream
|
vstream
|
||||||
|
|||||||
@@ -3,10 +3,12 @@
|
|||||||
set -euxo pipefail
|
set -euxo pipefail
|
||||||
|
|
||||||
# Cleanup the old github host key
|
# Cleanup the old github host key
|
||||||
sed -i -e '/AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31\/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi\/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==/d' ~/.ssh/known_hosts
|
if [[ -f ~/.ssh/known_hosts ]]; then
|
||||||
# Add new github host key
|
# Add new github host key
|
||||||
|
sed -i -e '/AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31\/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi\/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==/d' ~/.ssh/known_hosts
|
||||||
curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \
|
curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \
|
||||||
sed -e 's/^/github.com /' >> ~/.ssh/known_hosts
|
sed -e 's/^/github.com /' >> ~/.ssh/known_hosts
|
||||||
|
fi
|
||||||
|
|
||||||
# Frigate normal container runs as root, so it have permission to create
|
# Frigate normal container runs as root, so it have permission to create
|
||||||
# the folders. But the devcontainer runs as the host user, so we need to
|
# the folders. But the devcontainer runs as the host user, so we need to
|
||||||
|
|||||||
13
.github/DISCUSSION_TEMPLATE/detector-support.yml
vendored
13
.github/DISCUSSION_TEMPLATE/detector-support.yml
vendored
@@ -74,19 +74,6 @@ body:
|
|||||||
- CPU (no coral)
|
- CPU (no coral)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: dropdown
|
|
||||||
id: object-detector
|
|
||||||
attributes:
|
|
||||||
label: Object Detector
|
|
||||||
options:
|
|
||||||
- Coral
|
|
||||||
- OpenVino
|
|
||||||
- TensorRT
|
|
||||||
- RKNN
|
|
||||||
- Other
|
|
||||||
- CPU (no coral)
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: screenshots
|
id: screenshots
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
13
.github/DISCUSSION_TEMPLATE/general-support.yml
vendored
13
.github/DISCUSSION_TEMPLATE/general-support.yml
vendored
@@ -102,19 +102,6 @@ body:
|
|||||||
- CPU (no coral)
|
- CPU (no coral)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: dropdown
|
|
||||||
id: object-detector
|
|
||||||
attributes:
|
|
||||||
label: Object Detector
|
|
||||||
options:
|
|
||||||
- Coral
|
|
||||||
- OpenVino
|
|
||||||
- TensorRT
|
|
||||||
- RKNN
|
|
||||||
- Other
|
|
||||||
- CPU (no coral)
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
id: network
|
id: network
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
1
.github/pull_request_template.md
vendored
1
.github/pull_request_template.md
vendored
@@ -13,6 +13,7 @@
|
|||||||
- [ ] New feature
|
- [ ] New feature
|
||||||
- [ ] Breaking change (fix/feature causing existing functionality to break)
|
- [ ] Breaking change (fix/feature causing existing functionality to break)
|
||||||
- [ ] Code quality improvements to existing code
|
- [ ] Code quality improvements to existing code
|
||||||
|
- [ ] Documentation Update
|
||||||
|
|
||||||
## Additional information
|
## Additional information
|
||||||
|
|
||||||
|
|||||||
24
.github/workflows/ci.yml
vendored
24
.github/workflows/ci.yml
vendored
@@ -6,6 +6,8 @@ on:
|
|||||||
branches:
|
branches:
|
||||||
- dev
|
- dev
|
||||||
- master
|
- master
|
||||||
|
paths-ignore:
|
||||||
|
- 'docs/**'
|
||||||
|
|
||||||
# only run the latest commit to avoid cache overwrites
|
# only run the latest commit to avoid cache overwrites
|
||||||
concurrency:
|
concurrency:
|
||||||
@@ -155,6 +157,28 @@ jobs:
|
|||||||
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt
|
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt
|
||||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
|
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
|
||||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max
|
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max
|
||||||
|
arm64_extra_builds:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: ARM Extra Build
|
||||||
|
needs:
|
||||||
|
- arm64_build
|
||||||
|
steps:
|
||||||
|
- name: Check out code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Set up QEMU and Buildx
|
||||||
|
id: setup
|
||||||
|
uses: ./.github/actions/setup
|
||||||
|
with:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Build and push Rockchip build
|
||||||
|
uses: docker/bake-action@v3
|
||||||
|
with:
|
||||||
|
push: true
|
||||||
|
targets: rk
|
||||||
|
files: docker/rockchip/rk.hcl
|
||||||
|
set: |
|
||||||
|
rk.tags=${{ steps.setup.outputs.image-name }}-rk
|
||||||
|
*.cache-from=type=gha
|
||||||
combined_extra_builds:
|
combined_extra_builds:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Combined Extra Builds
|
name: Combined Extra Builds
|
||||||
|
|||||||
5
.github/workflows/pull_request.yml
vendored
5
.github/workflows/pull_request.yml
vendored
@@ -1,6 +1,9 @@
|
|||||||
name: On pull request
|
name: On pull request
|
||||||
|
|
||||||
on: pull_request
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths-ignore:
|
||||||
|
- 'docs/**'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
DEFAULT_PYTHON: 3.9
|
DEFAULT_PYTHON: 3.9
|
||||||
|
|||||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -34,14 +34,14 @@ jobs:
|
|||||||
STABLE_TAG=${BASE}:stable
|
STABLE_TAG=${BASE}:stable
|
||||||
PULL_TAG=${BASE}:${BUILD_TAG}
|
PULL_TAG=${BASE}:${BUILD_TAG}
|
||||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG}
|
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG}
|
||||||
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk; do
|
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk h8l rocm; do
|
||||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant}
|
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant}
|
||||||
done
|
done
|
||||||
|
|
||||||
# stable tag
|
# stable tag
|
||||||
if [[ "${BUILD_TYPE}" == "stable" ]]; then
|
if [[ "${BUILD_TYPE}" == "stable" ]]; then
|
||||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${STABLE_TAG}
|
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${STABLE_TAG}
|
||||||
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk; do
|
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk h8l rocm; do
|
||||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${STABLE_TAG}-${variant}
|
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${STABLE_TAG}-${variant}
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ services:
|
|||||||
# count: 1
|
# count: 1
|
||||||
# capabilities: [gpu]
|
# capabilities: [gpu]
|
||||||
environment:
|
environment:
|
||||||
YOLO_MODELS: yolov7-320
|
YOLO_MODELS: ""
|
||||||
devices:
|
devices:
|
||||||
- /dev/bus/usb:/dev/bus/usb
|
- /dev/bus/usb:/dev/bus/usb
|
||||||
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware
|
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware
|
||||||
|
|||||||
@@ -16,89 +16,25 @@ RUN mkdir /h8l-wheels
|
|||||||
# Build the wheels
|
# Build the wheels
|
||||||
RUN pip3 wheel --wheel-dir=/h8l-wheels -c /requirements-wheels.txt -r /requirements-wheels-h8l.txt
|
RUN pip3 wheel --wheel-dir=/h8l-wheels -c /requirements-wheels.txt -r /requirements-wheels-h8l.txt
|
||||||
|
|
||||||
# Build HailoRT and create wheel
|
FROM wget AS hailort
|
||||||
FROM wheels AS build-hailort
|
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
RUN --mount=type=bind,source=docker/hailo8l/install_hailort.sh,target=/deps/install_hailort.sh \
|
||||||
SHELL ["/bin/bash", "-c"]
|
/deps/install_hailort.sh
|
||||||
|
|
||||||
# Install necessary APT packages
|
|
||||||
RUN apt-get -qq update \
|
|
||||||
&& apt-get -qq install -y \
|
|
||||||
apt-transport-https \
|
|
||||||
gnupg \
|
|
||||||
wget \
|
|
||||||
# the key fingerprint can be obtained from https://ftp-master.debian.org/keys.html
|
|
||||||
&& wget -qO- "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xA4285295FC7B1A81600062A9605C66F00D6C9793" | \
|
|
||||||
gpg --dearmor > /usr/share/keyrings/debian-archive-bullseye-stable.gpg \
|
|
||||||
&& echo "deb [signed-by=/usr/share/keyrings/debian-archive-bullseye-stable.gpg] http://deb.debian.org/debian bullseye main contrib non-free" | \
|
|
||||||
tee /etc/apt/sources.list.d/debian-bullseye-nonfree.list \
|
|
||||||
&& apt-get -qq update \
|
|
||||||
&& apt-get -qq install -y \
|
|
||||||
python3.9 \
|
|
||||||
python3.9-dev \
|
|
||||||
build-essential cmake git \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Extract Python version and set environment variables
|
|
||||||
RUN PYTHON_VERSION=$(python3 --version 2>&1 | awk '{print $2}' | cut -d. -f1,2) && \
|
|
||||||
PYTHON_VERSION_NO_DOT=$(echo $PYTHON_VERSION | sed 's/\.//') && \
|
|
||||||
echo "PYTHON_VERSION=$PYTHON_VERSION" > /etc/environment && \
|
|
||||||
echo "PYTHON_VERSION_NO_DOT=$PYTHON_VERSION_NO_DOT" >> /etc/environment
|
|
||||||
|
|
||||||
# Clone and build HailoRT
|
|
||||||
RUN . /etc/environment && \
|
|
||||||
git clone https://github.com/hailo-ai/hailort.git /opt/hailort && \
|
|
||||||
cd /opt/hailort && \
|
|
||||||
git checkout v4.18.0 && \
|
|
||||||
cmake -H. -Bbuild -DCMAKE_BUILD_TYPE=Release -DHAILO_BUILD_PYBIND=1 -DPYBIND11_PYTHON_VERSION=${PYTHON_VERSION} && \
|
|
||||||
cmake --build build --config release --target libhailort && \
|
|
||||||
cmake --build build --config release --target _pyhailort && \
|
|
||||||
cp build/hailort/libhailort/bindings/python/src/_pyhailort.cpython-${PYTHON_VERSION_NO_DOT}-$(if [ $TARGETARCH == "amd64" ]; then echo 'x86_64'; else echo 'aarch64'; fi )-linux-gnu.so hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/ && \
|
|
||||||
cp build/hailort/libhailort/src/libhailort.so hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/
|
|
||||||
|
|
||||||
RUN ls -ahl /opt/hailort/build/hailort/libhailort/src/
|
|
||||||
RUN ls -ahl /opt/hailort/hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/
|
|
||||||
|
|
||||||
# Remove the existing setup.py if it exists in the target directory
|
|
||||||
RUN rm -f /opt/hailort/hailort/libhailort/bindings/python/platform/setup.py
|
|
||||||
|
|
||||||
# Copy generate_wheel_conf.py and setup.py
|
|
||||||
COPY docker/hailo8l/pyhailort_build_scripts/generate_wheel_conf.py /opt/hailort/hailort/libhailort/bindings/python/platform/generate_wheel_conf.py
|
|
||||||
COPY docker/hailo8l/pyhailort_build_scripts/setup.py /opt/hailort/hailort/libhailort/bindings/python/platform/setup.py
|
|
||||||
|
|
||||||
# Run the generate_wheel_conf.py script
|
|
||||||
RUN python3 /opt/hailort/hailort/libhailort/bindings/python/platform/generate_wheel_conf.py
|
|
||||||
|
|
||||||
# Create a wheel file using pip3 wheel
|
|
||||||
RUN cd /opt/hailort/hailort/libhailort/bindings/python/platform && \
|
|
||||||
python3 setup.py bdist_wheel --dist-dir /hailo-wheels
|
|
||||||
|
|
||||||
# Use deps as the base image
|
# Use deps as the base image
|
||||||
FROM deps AS h8l-frigate
|
FROM deps AS h8l-frigate
|
||||||
|
|
||||||
# Copy the wheels from the wheels stage
|
# Copy the wheels from the wheels stage
|
||||||
COPY --from=h8l-wheels /h8l-wheels /deps/h8l-wheels
|
COPY --from=h8l-wheels /h8l-wheels /deps/h8l-wheels
|
||||||
COPY --from=build-hailort /hailo-wheels /deps/hailo-wheels
|
COPY --from=hailort /hailo-wheels /deps/hailo-wheels
|
||||||
COPY --from=build-hailort /etc/environment /etc/environment
|
COPY --from=hailort /rootfs/ /
|
||||||
RUN CC=$(python3 -c "import sysconfig; import shlex; cc = sysconfig.get_config_var('CC'); cc_cmd = shlex.split(cc)[0]; print(cc_cmd[:-4] if cc_cmd.endswith('-gcc') else cc_cmd)") && \
|
|
||||||
echo "CC=$CC" >> /etc/environment
|
|
||||||
|
|
||||||
# Install the wheels
|
# Install the wheels
|
||||||
RUN pip3 install -U /deps/h8l-wheels/*.whl
|
RUN pip3 install -U /deps/h8l-wheels/*.whl
|
||||||
RUN pip3 install -U /deps/hailo-wheels/*.whl
|
RUN pip3 install -U /deps/hailo-wheels/*.whl
|
||||||
|
|
||||||
RUN . /etc/environment && \
|
|
||||||
mv /usr/local/lib/python${PYTHON_VERSION}/dist-packages/hailo_platform/pyhailort/libhailort.so /usr/lib/${CC} && \
|
|
||||||
cd /usr/lib/${CC}/ && \
|
|
||||||
ln -s libhailort.so libhailort.so.4.18.0
|
|
||||||
|
|
||||||
# Copy base files from the rootfs stage
|
# Copy base files from the rootfs stage
|
||||||
COPY --from=rootfs / /
|
COPY --from=rootfs / /
|
||||||
|
|
||||||
# Set environment variables for Hailo SDK
|
|
||||||
ENV PATH="/opt/hailort/bin:${PATH}"
|
|
||||||
ENV LD_LIBRARY_PATH="/usr/lib/$(if [ $TARGETARCH == "amd64" ]; then echo 'x86_64'; else echo 'aarch64'; fi )-linux-gnu:${LD_LIBRARY_PATH}"
|
|
||||||
|
|
||||||
# Set workdir
|
# Set workdir
|
||||||
WORKDIR /opt/frigate/
|
WORKDIR /opt/frigate/
|
||||||
|
|||||||
@@ -1,3 +1,9 @@
|
|||||||
|
target wget {
|
||||||
|
dockerfile = "docker/main/Dockerfile"
|
||||||
|
platforms = ["linux/arm64","linux/amd64"]
|
||||||
|
target = "wget"
|
||||||
|
}
|
||||||
|
|
||||||
target wheels {
|
target wheels {
|
||||||
dockerfile = "docker/main/Dockerfile"
|
dockerfile = "docker/main/Dockerfile"
|
||||||
platforms = ["linux/arm64","linux/amd64"]
|
platforms = ["linux/arm64","linux/amd64"]
|
||||||
@@ -19,6 +25,7 @@ target rootfs {
|
|||||||
target h8l {
|
target h8l {
|
||||||
dockerfile = "docker/hailo8l/Dockerfile"
|
dockerfile = "docker/hailo8l/Dockerfile"
|
||||||
contexts = {
|
contexts = {
|
||||||
|
wget = "target:wget"
|
||||||
wheels = "target:wheels"
|
wheels = "target:wheels"
|
||||||
deps = "target:deps"
|
deps = "target:deps"
|
||||||
rootfs = "target:rootfs"
|
rootfs = "target:rootfs"
|
||||||
|
|||||||
21
docker/hailo8l/install_hailort.sh
Executable file
21
docker/hailo8l/install_hailort.sh
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -euxo pipefail
|
||||||
|
|
||||||
|
hailo_version="4.19.0"
|
||||||
|
|
||||||
|
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||||
|
arch="x86_64"
|
||||||
|
elif [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||||
|
arch="aarch64"
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p /rootfs
|
||||||
|
|
||||||
|
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${TARGETARCH}.tar.gz" |
|
||||||
|
tar -C /rootfs/ -xzf -
|
||||||
|
|
||||||
|
mkdir -p /hailo-wheels
|
||||||
|
|
||||||
|
wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp39-cp39-linux_${arch}.whl"
|
||||||
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
import json
|
|
||||||
import os
|
|
||||||
import platform
|
|
||||||
import sys
|
|
||||||
import sysconfig
|
|
||||||
|
|
||||||
|
|
||||||
def extract_toolchain_info(compiler):
|
|
||||||
# Remove the "-gcc" or "-g++" suffix if present
|
|
||||||
if compiler.endswith("-gcc") or compiler.endswith("-g++"):
|
|
||||||
compiler = compiler.rsplit("-", 1)[0]
|
|
||||||
|
|
||||||
# Extract the toolchain and ABI part (e.g., "gnu")
|
|
||||||
toolchain_parts = compiler.split("-")
|
|
||||||
abi_conventions = next(
|
|
||||||
(part for part in toolchain_parts if part in ["gnu", "musl", "eabi", "uclibc"]),
|
|
||||||
"",
|
|
||||||
)
|
|
||||||
|
|
||||||
return abi_conventions
|
|
||||||
|
|
||||||
|
|
||||||
def generate_wheel_conf():
|
|
||||||
conf_file_path = os.path.join(
|
|
||||||
os.path.abspath(os.path.dirname(__file__)), "wheel_conf.json"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Extract current system and Python version information
|
|
||||||
py_version = f"cp{sys.version_info.major}{sys.version_info.minor}"
|
|
||||||
arch = platform.machine()
|
|
||||||
system = platform.system().lower()
|
|
||||||
libc_version = platform.libc_ver()[1]
|
|
||||||
|
|
||||||
# Get the compiler information
|
|
||||||
compiler = sysconfig.get_config_var("CC")
|
|
||||||
abi_conventions = extract_toolchain_info(compiler)
|
|
||||||
|
|
||||||
# Create the new configuration data
|
|
||||||
new_conf_data = {
|
|
||||||
"py_version": py_version,
|
|
||||||
"arch": arch,
|
|
||||||
"system": system,
|
|
||||||
"libc_version": libc_version,
|
|
||||||
"abi": abi_conventions,
|
|
||||||
"extension": {
|
|
||||||
"posix": "so",
|
|
||||||
"nt": "pyd", # Windows
|
|
||||||
}[os.name],
|
|
||||||
}
|
|
||||||
|
|
||||||
# If the file exists, load the existing data
|
|
||||||
if os.path.isfile(conf_file_path):
|
|
||||||
with open(conf_file_path, "r") as conf_file:
|
|
||||||
conf_data = json.load(conf_file)
|
|
||||||
# Update the existing data with the new data
|
|
||||||
conf_data.update(new_conf_data)
|
|
||||||
else:
|
|
||||||
# If the file does not exist, use the new data
|
|
||||||
conf_data = new_conf_data
|
|
||||||
|
|
||||||
# Write the updated data to the file
|
|
||||||
with open(conf_file_path, "w") as conf_file:
|
|
||||||
json.dump(conf_data, conf_file, indent=4)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
generate_wheel_conf()
|
|
||||||
@@ -1,111 +0,0 @@
|
|||||||
import json
|
|
||||||
import os
|
|
||||||
|
|
||||||
from setuptools import find_packages, setup
|
|
||||||
from wheel.bdist_wheel import bdist_wheel as orig_bdist_wheel
|
|
||||||
|
|
||||||
|
|
||||||
class NonPurePythonBDistWheel(orig_bdist_wheel):
|
|
||||||
"""Makes the wheel platform-dependent so it can be based on the _pyhailort architecture"""
|
|
||||||
|
|
||||||
def finalize_options(self):
|
|
||||||
orig_bdist_wheel.finalize_options(self)
|
|
||||||
self.root_is_pure = False
|
|
||||||
|
|
||||||
|
|
||||||
def _get_hailort_lib_path():
|
|
||||||
lib_filename = "libhailort.so"
|
|
||||||
lib_path = os.path.join(
|
|
||||||
os.path.abspath(os.path.dirname(__file__)),
|
|
||||||
f"hailo_platform/pyhailort/{lib_filename}",
|
|
||||||
)
|
|
||||||
if os.path.exists(lib_path):
|
|
||||||
print(f"Found libhailort shared library at: {lib_path}")
|
|
||||||
else:
|
|
||||||
print(f"Error: libhailort shared library not found at: {lib_path}")
|
|
||||||
raise FileNotFoundError(f"libhailort shared library not found at: {lib_path}")
|
|
||||||
return lib_path
|
|
||||||
|
|
||||||
|
|
||||||
def _get_pyhailort_lib_path():
|
|
||||||
conf_file_path = os.path.join(
|
|
||||||
os.path.abspath(os.path.dirname(__file__)), "wheel_conf.json"
|
|
||||||
)
|
|
||||||
if not os.path.isfile(conf_file_path):
|
|
||||||
raise FileNotFoundError(f"Configuration file not found: {conf_file_path}")
|
|
||||||
|
|
||||||
with open(conf_file_path, "r") as conf_file:
|
|
||||||
content = json.load(conf_file)
|
|
||||||
py_version = content["py_version"]
|
|
||||||
arch = content["arch"]
|
|
||||||
system = content["system"]
|
|
||||||
extension = content["extension"]
|
|
||||||
abi = content["abi"]
|
|
||||||
|
|
||||||
# Construct the filename directly
|
|
||||||
lib_filename = f"_pyhailort.cpython-{py_version.split('cp')[1]}-{arch}-{system}-{abi}.{extension}"
|
|
||||||
lib_path = os.path.join(
|
|
||||||
os.path.abspath(os.path.dirname(__file__)),
|
|
||||||
f"hailo_platform/pyhailort/{lib_filename}",
|
|
||||||
)
|
|
||||||
|
|
||||||
if os.path.exists(lib_path):
|
|
||||||
print(f"Found _pyhailort shared library at: {lib_path}")
|
|
||||||
else:
|
|
||||||
print(f"Error: _pyhailort shared library not found at: {lib_path}")
|
|
||||||
raise FileNotFoundError(
|
|
||||||
f"_pyhailort shared library not found at: {lib_path}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return lib_path
|
|
||||||
|
|
||||||
|
|
||||||
def _get_package_paths():
|
|
||||||
packages = []
|
|
||||||
pyhailort_lib = _get_pyhailort_lib_path()
|
|
||||||
hailort_lib = _get_hailort_lib_path()
|
|
||||||
if pyhailort_lib:
|
|
||||||
packages.append(pyhailort_lib)
|
|
||||||
if hailort_lib:
|
|
||||||
packages.append(hailort_lib)
|
|
||||||
packages.append(os.path.abspath("hailo_tutorials/notebooks/*"))
|
|
||||||
packages.append(os.path.abspath("hailo_tutorials/hefs/*"))
|
|
||||||
return packages
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
setup(
|
|
||||||
author="Hailo team",
|
|
||||||
author_email="contact@hailo.ai",
|
|
||||||
cmdclass={
|
|
||||||
"bdist_wheel": NonPurePythonBDistWheel,
|
|
||||||
},
|
|
||||||
description="HailoRT",
|
|
||||||
entry_points={
|
|
||||||
"console_scripts": [
|
|
||||||
"hailo=hailo_platform.tools.hailocli.main:main",
|
|
||||||
]
|
|
||||||
},
|
|
||||||
install_requires=[
|
|
||||||
"argcomplete",
|
|
||||||
"contextlib2",
|
|
||||||
"future",
|
|
||||||
"netaddr",
|
|
||||||
"netifaces",
|
|
||||||
"verboselogs",
|
|
||||||
"numpy==1.23.3",
|
|
||||||
],
|
|
||||||
name="hailort",
|
|
||||||
package_data={
|
|
||||||
"hailo_platform": _get_package_paths(),
|
|
||||||
},
|
|
||||||
packages=find_packages(),
|
|
||||||
platforms=[
|
|
||||||
"linux_x86_64",
|
|
||||||
"linux_aarch64",
|
|
||||||
"win_amd64",
|
|
||||||
],
|
|
||||||
url="https://hailo.ai/",
|
|
||||||
version="4.17.0",
|
|
||||||
zip_safe=False,
|
|
||||||
)
|
|
||||||
@@ -13,7 +13,7 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Clone the HailoRT driver repository
|
# Clone the HailoRT driver repository
|
||||||
git clone --depth 1 --branch v4.18.0 https://github.com/hailo-ai/hailort-drivers.git
|
git clone --depth 1 --branch v4.19.0 https://github.com/hailo-ai/hailort-drivers.git
|
||||||
|
|
||||||
# Build and install the HailoRT driver
|
# Build and install the HailoRT driver
|
||||||
cd hailort-drivers/linux/pcie
|
cd hailort-drivers/linux/pcie
|
||||||
@@ -38,7 +38,7 @@ cd ../../
|
|||||||
if [ ! -d /lib/firmware/hailo ]; then
|
if [ ! -d /lib/firmware/hailo ]; then
|
||||||
sudo mkdir /lib/firmware/hailo
|
sudo mkdir /lib/firmware/hailo
|
||||||
fi
|
fi
|
||||||
sudo mv hailo8_fw.4.17.0.bin /lib/firmware/hailo/hailo8_fw.bin
|
sudo mv hailo8_fw.*.bin /lib/firmware/hailo/hailo8_fw.bin
|
||||||
|
|
||||||
# Install udev rules
|
# Install udev rules
|
||||||
sudo cp ./linux/pcie/51-hailo-udev.rules /etc/udev/rules.d/
|
sudo cp ./linux/pcie/51-hailo-udev.rules /etc/udev/rules.d/
|
||||||
|
|||||||
@@ -180,9 +180,6 @@ RUN /build_pysqlite3.sh
|
|||||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||||
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt
|
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt
|
||||||
|
|
||||||
COPY docker/main/requirements-wheels-post.txt /requirements-wheels-post.txt
|
|
||||||
RUN pip3 wheel --no-deps --wheel-dir=/wheels-post -r /requirements-wheels-post.txt
|
|
||||||
|
|
||||||
|
|
||||||
# Collect deps in a single layer
|
# Collect deps in a single layer
|
||||||
FROM scratch AS deps-rootfs
|
FROM scratch AS deps-rootfs
|
||||||
@@ -214,6 +211,9 @@ ENV TOKENIZERS_PARALLELISM=true
|
|||||||
# https://github.com/huggingface/transformers/issues/27214
|
# https://github.com/huggingface/transformers/issues/27214
|
||||||
ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
|
ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
|
||||||
|
|
||||||
|
# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
|
||||||
|
ENV OPENCV_FFMPEG_LOGLEVEL=8
|
||||||
|
|
||||||
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
||||||
ENV LIBAVFORMAT_VERSION_MAJOR=60
|
ENV LIBAVFORMAT_VERSION_MAJOR=60
|
||||||
|
|
||||||
@@ -225,14 +225,6 @@ RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
|
|||||||
python3 -m pip install --upgrade pip && \
|
python3 -m pip install --upgrade pip && \
|
||||||
pip3 install -U /deps/wheels/*.whl
|
pip3 install -U /deps/wheels/*.whl
|
||||||
|
|
||||||
# We have to uninstall this dependency specifically
|
|
||||||
# as it will break onnxruntime-openvino
|
|
||||||
RUN pip3 uninstall -y onnxruntime
|
|
||||||
|
|
||||||
RUN --mount=type=bind,from=wheels,source=/wheels-post,target=/deps/wheels \
|
|
||||||
python3 -m pip install --upgrade pip && \
|
|
||||||
pip3 install -U /deps/wheels/*.whl
|
|
||||||
|
|
||||||
COPY --from=deps-rootfs / /
|
COPY --from=deps-rootfs / /
|
||||||
|
|
||||||
RUN ldconfig
|
RUN ldconfig
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ apt-get -qq install --no-install-recommends -y \
|
|||||||
apt-transport-https \
|
apt-transport-https \
|
||||||
gnupg \
|
gnupg \
|
||||||
wget \
|
wget \
|
||||||
|
lbzip2 \
|
||||||
procps vainfo \
|
procps vainfo \
|
||||||
unzip locales tzdata libxml2 xz-utils \
|
unzip locales tzdata libxml2 xz-utils \
|
||||||
python3.9 \
|
python3.9 \
|
||||||
@@ -45,7 +46,7 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
|||||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz"
|
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz"
|
||||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
|
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
|
||||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
|
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
|
||||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-30-15-36/ffmpeg-n7.1-linux64-gpl-7.1.tar.xz"
|
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz"
|
||||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
|
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
|
||||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
|
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
|
||||||
fi
|
fi
|
||||||
@@ -57,7 +58,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
|
|||||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz"
|
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz"
|
||||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
|
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
|
||||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
|
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
|
||||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-30-15-36/ffmpeg-n7.1-linuxarm64-gpl-7.1.tar.xz"
|
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz"
|
||||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
|
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
|
||||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
|
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
|
||||||
fi
|
fi
|
||||||
@@ -76,6 +77,9 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
|||||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||||
i965-va-driver-shaders
|
i965-va-driver-shaders
|
||||||
|
|
||||||
|
# intel packages use zst compression so we need to update dpkg
|
||||||
|
apt-get install -y dpkg
|
||||||
|
|
||||||
rm -f /etc/apt/sources.list.d/debian-bookworm.list
|
rm -f /etc/apt/sources.list.d/debian-bookworm.list
|
||||||
|
|
||||||
# use intel apt intel packages
|
# use intel apt intel packages
|
||||||
@@ -83,8 +87,8 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
|||||||
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
|
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||||
apt-get -qq update
|
apt-get -qq update
|
||||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||||
intel-opencl-icd intel-level-zero-gpu intel-media-va-driver-non-free \
|
intel-opencl-icd=24.35.30872.31-996~22.04 intel-level-zero-gpu=1.3.29735.27-914~22.04 intel-media-va-driver-non-free=24.3.3-996~22.04 \
|
||||||
libmfx1 libmfxgen1 libvpl2
|
libmfx1=23.2.2-880~22.04 libmfxgen1=24.2.4-914~22.04 libvpl2=1:2.13.0.0-996~22.04
|
||||||
|
|
||||||
rm -f /usr/share/keyrings/intel-graphics.gpg
|
rm -f /usr/share/keyrings/intel-graphics.gpg
|
||||||
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
|
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
# ONNX
|
|
||||||
onnxruntime-openvino == 1.19.* ; platform_machine == 'x86_64'
|
|
||||||
onnxruntime == 1.19.* ; platform_machine == 'aarch64'
|
|
||||||
@@ -1,9 +1,11 @@
|
|||||||
click == 8.1.*
|
click == 8.1.*
|
||||||
# FastAPI
|
# FastAPI
|
||||||
|
aiohttp == 3.11.2
|
||||||
|
starlette == 0.41.2
|
||||||
starlette-context == 0.3.6
|
starlette-context == 0.3.6
|
||||||
fastapi == 0.115.0
|
fastapi == 0.115.*
|
||||||
uvicorn == 0.30.*
|
uvicorn == 0.30.*
|
||||||
slowapi == 0.1.9
|
slowapi == 0.1.*
|
||||||
imutils == 0.5.*
|
imutils == 0.5.*
|
||||||
joserfc == 1.0.*
|
joserfc == 1.0.*
|
||||||
pathvalidate == 3.2.*
|
pathvalidate == 3.2.*
|
||||||
@@ -16,10 +18,10 @@ paho-mqtt == 2.1.*
|
|||||||
pandas == 2.2.*
|
pandas == 2.2.*
|
||||||
peewee == 3.17.*
|
peewee == 3.17.*
|
||||||
peewee_migrate == 1.13.*
|
peewee_migrate == 1.13.*
|
||||||
psutil == 5.9.*
|
psutil == 6.1.*
|
||||||
pydantic == 2.8.*
|
pydantic == 2.8.*
|
||||||
git+https://github.com/fbcotter/py3nvml#egg=py3nvml
|
git+https://github.com/fbcotter/py3nvml#egg=py3nvml
|
||||||
pytz == 2024.1
|
pytz == 2024.*
|
||||||
pyzmq == 26.2.*
|
pyzmq == 26.2.*
|
||||||
ruamel.yaml == 0.18.*
|
ruamel.yaml == 0.18.*
|
||||||
tzlocal == 5.2
|
tzlocal == 5.2
|
||||||
@@ -30,11 +32,12 @@ norfair == 2.2.*
|
|||||||
setproctitle == 1.3.*
|
setproctitle == 1.3.*
|
||||||
ws4py == 0.5.*
|
ws4py == 0.5.*
|
||||||
unidecode == 1.3.*
|
unidecode == 1.3.*
|
||||||
# OpenVino (ONNX installed in wheels-post)
|
# OpenVino & ONNX
|
||||||
openvino == 2024.3.*
|
openvino == 2024.3.*
|
||||||
|
onnxruntime-openvino == 1.19.* ; platform_machine == 'x86_64'
|
||||||
|
onnxruntime == 1.19.* ; platform_machine == 'aarch64'
|
||||||
# Embeddings
|
# Embeddings
|
||||||
transformers == 4.45.*
|
transformers == 4.45.*
|
||||||
onnx_clip == 4.0.*
|
|
||||||
# Generative AI
|
# Generative AI
|
||||||
google-generativeai == 0.8.*
|
google-generativeai == 0.8.*
|
||||||
ollama == 0.3.*
|
ollama == 0.3.*
|
||||||
|
|||||||
@@ -165,7 +165,7 @@ if config.get("birdseye", {}).get("restream", False):
|
|||||||
birdseye: dict[str, any] = config.get("birdseye")
|
birdseye: dict[str, any] = config.get("birdseye")
|
||||||
|
|
||||||
input = f"-f rawvideo -pix_fmt yuv420p -video_size {birdseye.get('width', 1280)}x{birdseye.get('height', 720)} -r 10 -i {BIRDSEYE_PIPE}"
|
input = f"-f rawvideo -pix_fmt yuv420p -video_size {birdseye.get('width', 1280)}x{birdseye.get('height', 720)} -r 10 -i {BIRDSEYE_PIPE}"
|
||||||
ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(ffmpeg_path, config.get('ffmpeg', {}).get('hwaccel_args'), input, '-rtsp_transport tcp -f rtsp {output}')}"
|
ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(ffmpeg_path, config.get('ffmpeg', {}).get('hwaccel_args', ''), input, '-rtsp_transport tcp -f rtsp {output}')}"
|
||||||
|
|
||||||
if go2rtc_config.get("streams"):
|
if go2rtc_config.get("streams"):
|
||||||
go2rtc_config["streams"]["birdseye"] = ffmpeg_cmd
|
go2rtc_config["streams"]["birdseye"] = ffmpeg_cmd
|
||||||
|
|||||||
@@ -41,7 +41,11 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,t
|
|||||||
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
|
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
|
||||||
|
|
||||||
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
|
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
|
||||||
RUN pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
|
ADD https://nvidia.box.com/shared/static/9aemm4grzbbkfaesg5l7fplgjtmswhj8.whl /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
|
||||||
|
|
||||||
|
RUN pip3 uninstall -y onnxruntime-openvino \
|
||||||
|
&& pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt \
|
||||||
|
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
|
||||||
|
|
||||||
FROM build-wheels AS trt-model-wheels
|
FROM build-wheels AS trt-model-wheels
|
||||||
ARG DEBIAN_FRONTEND
|
ARG DEBIAN_FRONTEND
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
|||||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||||
COPY docker/tensorrt/detector/rootfs/ /
|
COPY docker/tensorrt/detector/rootfs/ /
|
||||||
ENV YOLO_MODELS="yolov7-320"
|
ENV YOLO_MODELS=""
|
||||||
|
|
||||||
HEALTHCHECK --start-period=600s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
|
HEALTHCHECK --start-period=600s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
|
||||||
CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
|
CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ set -o errexit -o nounset -o pipefail
|
|||||||
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"}
|
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"}
|
||||||
TRT_VER=${TRT_VER:-$(cat /etc/TENSORRT_VER)}
|
TRT_VER=${TRT_VER:-$(cat /etc/TENSORRT_VER)}
|
||||||
OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}"
|
OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}"
|
||||||
|
YOLO_MODELS=${YOLO_MODELS:-""}
|
||||||
|
|
||||||
# Create output folder
|
# Create output folder
|
||||||
mkdir -p ${OUTPUT_FOLDER}
|
mkdir -p ${OUTPUT_FOLDER}
|
||||||
@@ -19,6 +20,11 @@ FIRST_MODEL=true
|
|||||||
MODEL_DOWNLOAD=""
|
MODEL_DOWNLOAD=""
|
||||||
MODEL_CONVERT=""
|
MODEL_CONVERT=""
|
||||||
|
|
||||||
|
if [ -z "$YOLO_MODELS"]; then
|
||||||
|
echo "tensorrt model preparation disabled"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
for model in ${YOLO_MODELS//,/ }
|
for model in ${YOLO_MODELS//,/ }
|
||||||
do
|
do
|
||||||
# Remove old link in case path/version changed
|
# Remove old link in case path/version changed
|
||||||
|
|||||||
@@ -9,6 +9,6 @@ nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
|
|||||||
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
|
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
|
||||||
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
|
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
|
||||||
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
|
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
|
||||||
onnx==1.14.0; platform_machine == 'x86_64'
|
onnx==1.16.*; platform_machine == 'x86_64'
|
||||||
onnxruntime-gpu==1.17.*; platform_machine == 'x86_64'
|
onnxruntime-gpu==1.18.*; platform_machine == 'x86_64'
|
||||||
protobuf==3.20.3; platform_machine == 'x86_64'
|
protobuf==3.20.3; platform_machine == 'x86_64'
|
||||||
|
|||||||
@@ -181,7 +181,7 @@ go2rtc:
|
|||||||
- rtspx://192.168.1.1:7441/abcdefghijk
|
- rtspx://192.168.1.1:7441/abcdefghijk
|
||||||
```
|
```
|
||||||
|
|
||||||
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-rtsp)
|
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-rtsp)
|
||||||
|
|
||||||
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.
|
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.
|
||||||
|
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ This list of working and non-working PTZ cameras is based on user feedback.
|
|||||||
| Reolink E1 Zoom | ✅ | ❌ | |
|
| Reolink E1 Zoom | ✅ | ❌ | |
|
||||||
| Reolink RLC-823A 16x | ✅ | ❌ | |
|
| Reolink RLC-823A 16x | ✅ | ❌ | |
|
||||||
| Speco O8P32X | ✅ | ❌ | |
|
| Speco O8P32X | ✅ | ❌ | |
|
||||||
| Sunba 405-D20X | ✅ | ❌ | |
|
| Sunba 405-D20X | ✅ | ❌ | Incomplete ONVIF support reported on original, and 4k models. All models are suspected incompatable. |
|
||||||
| Tapo | ✅ | ❌ | Many models supported, ONVIF Service Port: 2020 |
|
| Tapo | ✅ | ❌ | Many models supported, ONVIF Service Port: 2020 |
|
||||||
| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands |
|
| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands |
|
||||||
| Uniview IPC6612SR-X33-VG | ✅ | ✅ | Leave `calibrate_on_startup` as `False`. A user has reported that zooming with `absolute` is working. |
|
| Uniview IPC6612SR-X33-VG | ✅ | ✅ | Leave `calibrate_on_startup` as `False`. A user has reported that zooming with `absolute` is working. |
|
||||||
|
|||||||
@@ -3,9 +3,13 @@ id: genai
|
|||||||
title: Generative AI
|
title: Generative AI
|
||||||
---
|
---
|
||||||
|
|
||||||
Generative AI can be used to automatically generate descriptions based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate by providing detailed text descriptions as a basis of the search query.
|
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
|
||||||
|
|
||||||
Semantic Search must be enabled to use Generative AI. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
|
:::info
|
||||||
|
|
||||||
|
Semantic Search must be enabled to use Generative AI.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
@@ -29,11 +33,21 @@ cameras:
|
|||||||
|
|
||||||
## Ollama
|
## Ollama
|
||||||
|
|
||||||
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance. Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [docker container](https://hub.docker.com/r/ollama/ollama) available.
|
:::warning
|
||||||
|
|
||||||
|
Using Ollama on CPU is not recommended, high inference times make using Generative AI impractical.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
|
||||||
|
|
||||||
|
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available.
|
||||||
|
|
||||||
|
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-does-ollama-handle-concurrent-requests).
|
||||||
|
|
||||||
### Supported Models
|
### Supported Models
|
||||||
|
|
||||||
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). At the time of writing, this includes `llava`, `llava-llama3`, `llava-phi3`, and `moondream`.
|
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). At the time of writing, this includes `llava`, `llava-llama3`, `llava-phi3`, and `moondream`. Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first i.e. by running `ollama pull llava:7b` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
@@ -48,7 +62,7 @@ genai:
|
|||||||
enabled: True
|
enabled: True
|
||||||
provider: ollama
|
provider: ollama
|
||||||
base_url: http://localhost:11434
|
base_url: http://localhost:11434
|
||||||
model: llava
|
model: llava:7b
|
||||||
```
|
```
|
||||||
|
|
||||||
## Google Gemini
|
## Google Gemini
|
||||||
@@ -122,12 +136,22 @@ genai:
|
|||||||
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Usage and Best Practices
|
||||||
|
|
||||||
|
Frigate's thumbnail search excels at identifying specific details about tracked objects – for example, using an "image caption" approach to find a "person wearing a yellow vest," "a white dog running across the lawn," or "a red car on a residential street." To enhance this further, Frigate’s default prompts are designed to ask your AI provider about the intent behind the object's actions, rather than just describing its appearance.
|
||||||
|
|
||||||
|
While generating simple descriptions of detected objects is useful, understanding intent provides a deeper layer of insight. Instead of just recognizing "what" is in a scene, Frigate’s default prompts aim to infer "why" it might be there or "what" it could do next. Descriptions tell you what’s happening, but intent gives context. For instance, a person walking toward a door might seem like a visitor, but if they’re moving quickly after hours, you can infer a potential break-in attempt. Detecting a person loitering near a door at night can trigger an alert sooner than simply noting "a person standing by the door," helping you respond based on the situation’s context.
|
||||||
|
|
||||||
|
### Using GenAI for notifications
|
||||||
|
|
||||||
|
Frigate provides an [MQTT topic](/integrations/mqtt), `frigate/tracked_object_update`, that is updated with a JSON payload containing `event_id` and `description` when your AI provider returns a description for a tracked object. This description could be used directly in notifications, such as sending alerts to your phone or making audio announcements. If additional details from the tracked object are needed, you can query the [HTTP API](/integrations/api/event-events-event-id-get) using the `event_id`, eg: `http://frigate_ip:5000/api/events/<event_id>`.
|
||||||
|
|
||||||
## Custom Prompts
|
## Custom Prompts
|
||||||
|
|
||||||
Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:
|
Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:
|
||||||
|
|
||||||
```
|
```
|
||||||
Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background.
|
Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.
|
||||||
```
|
```
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
@@ -144,25 +168,25 @@ genai:
|
|||||||
provider: ollama
|
provider: ollama
|
||||||
base_url: http://localhost:11434
|
base_url: http://localhost:11434
|
||||||
model: llava
|
model: llava
|
||||||
prompt: "Describe the {label} in these images from the {camera} security camera."
|
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
|
||||||
object_prompts:
|
object_prompts:
|
||||||
person: "Describe the main person in these images (gender, age, clothing, activity, etc). Do not include where the activity is occurring (sidewalk, concrete, driveway, etc)."
|
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."
|
||||||
car: "Label the primary vehicle in these images with just the name of the company if it is a delivery vehicle, or the color make and model."
|
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
|
||||||
```
|
```
|
||||||
|
|
||||||
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
|
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
|
||||||
|
|
||||||
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the thumbnails collected over the object's lifetime to the model. Using a snapshot provides the AI with a higher-resolution image (typically downscaled by the AI itself), but the trade-off is that only a single image is used, which might limit the model's ability to determine object movement or direction.
|
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
cameras:
|
cameras:
|
||||||
front_door:
|
front_door:
|
||||||
genai:
|
genai:
|
||||||
use_snapshot: True
|
use_snapshot: True
|
||||||
prompt: "Describe the {label} in these images from the {camera} security camera at the front door of a house, aimed outward toward the street."
|
prompt: "Analyze the {label} in these images from the {camera} security camera at the front door. Focus on the actions and potential intent of the {label}."
|
||||||
object_prompts:
|
object_prompts:
|
||||||
person: "Describe the main person in these images (gender, age, clothing, activity, etc). Do not include where the activity is occurring (sidewalk, concrete, driveway, etc). If delivering a package, include the company the package is from."
|
person: "Examine the person in these images. What are they doing, and how might their actions suggest their purpose (e.g., delivering something, approaching, leaving)? If they are carrying or interacting with a package, include details about its source or destination."
|
||||||
cat: "Describe the cat in these images (color, size, tail). Indicate whether or not the cat is by the flower pots. If the cat is chasing a mouse, make up a name for the mouse."
|
cat: "Observe the cat in these images. Focus on its movement and intent (e.g., wandering, hunting, interacting with objects). If the cat is near the flower pots or engaging in any specific actions, mention it."
|
||||||
objects:
|
objects:
|
||||||
- person
|
- person
|
||||||
- cat
|
- cat
|
||||||
|
|||||||
@@ -92,10 +92,16 @@ motion:
|
|||||||
lightning_threshold: 0.8
|
lightning_threshold: 0.8
|
||||||
```
|
```
|
||||||
|
|
||||||
:::tip
|
:::warning
|
||||||
|
|
||||||
Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed.
|
Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
Lightning threshold does not stop motion based recordings from being saved.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in no motion detection. This is done via the `lightning_threshold` configuration. It is defined as the percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera.
|
Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in no motion detection. This is done via the `lightning_threshold` configuration. It is defined as the percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera.
|
||||||
|
|||||||
@@ -22,8 +22,8 @@ Frigate supports multiple different detectors that work on different types of ha
|
|||||||
- [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured.
|
- [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured.
|
||||||
|
|
||||||
**Nvidia**
|
**Nvidia**
|
||||||
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs, using one of many default models.
|
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs and Jetson devices, using one of many default models.
|
||||||
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
|
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` or `-tensorrt-jp(4/5)` Frigate images when a supported ONNX model is configured.
|
||||||
|
|
||||||
**Rockchip**
|
**Rockchip**
|
||||||
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
|
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
|
||||||
@@ -223,7 +223,7 @@ The model used for TensorRT must be preprocessed on the same hardware platform t
|
|||||||
|
|
||||||
The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host.
|
The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host.
|
||||||
|
|
||||||
By default, the `yolov7-320` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
|
By default, no models will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
|
||||||
|
|
||||||
If you have a Jetson device with DLAs (Xavier or Orin), you can generate a model that will run on the DLA by appending `-dla` to your model name, e.g. specify `YOLO_MODELS=yolov7-320-dla`. The model will run on DLA0 (Frigate does not currently support DLA1). DLA-incompatible layers will fall back to running on the GPU.
|
If you have a Jetson device with DLAs (Xavier or Orin), you can generate a model that will run on the DLA by appending `-dla` to your model name, e.g. specify `YOLO_MODELS=yolov7-320-dla`. The model will run on DLA0 (Frigate does not currently support DLA1). DLA-incompatible layers will fall back to running on the GPU.
|
||||||
|
|
||||||
@@ -264,7 +264,7 @@ An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yol
|
|||||||
```yml
|
```yml
|
||||||
frigate:
|
frigate:
|
||||||
environment:
|
environment:
|
||||||
- YOLO_MODELS=yolov4-608,yolov7x-640
|
- YOLO_MODELS=yolov7-320,yolov7x-640
|
||||||
- USE_FP16=false
|
- USE_FP16=false
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -415,6 +415,24 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl
|
|||||||
|
|
||||||
ONNX is an open format for building machine learning models, Frigate supports running ONNX models on CPU, OpenVINO, and TensorRT. On startup Frigate will automatically try to use a GPU if one is available.
|
ONNX is an open format for building machine learning models, Frigate supports running ONNX models on CPU, OpenVINO, and TensorRT. On startup Frigate will automatically try to use a GPU if one is available.
|
||||||
|
|
||||||
|
:::info
|
||||||
|
|
||||||
|
If the correct build is used for your GPU then the GPU will be detected and used automatically.
|
||||||
|
|
||||||
|
- **AMD**
|
||||||
|
|
||||||
|
- ROCm will automatically be detected and used with the ONNX detector in the `-rocm` Frigate image.
|
||||||
|
|
||||||
|
- **Intel**
|
||||||
|
|
||||||
|
- OpenVINO will automatically be detected and used with the ONNX detector in the default Frigate image.
|
||||||
|
|
||||||
|
- **Nvidia**
|
||||||
|
- Nvidia GPUs will automatically be detected and used with the ONNX detector in the `-tensorrt` Frigate image.
|
||||||
|
- Jetson devices will automatically be detected and used with the ONNX detector in the `-tensorrt-jp(4/5)` Frigate image.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
|
|
||||||
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
|
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
|
||||||
@@ -457,6 +475,7 @@ model:
|
|||||||
width: 320 # <--- should match whatever was set in notebook
|
width: 320 # <--- should match whatever was set in notebook
|
||||||
height: 320 # <--- should match whatever was set in notebook
|
height: 320 # <--- should match whatever was set in notebook
|
||||||
input_pixel_format: bgr
|
input_pixel_format: bgr
|
||||||
|
input_tensor: nchw
|
||||||
path: /config/yolo_nas_s.onnx
|
path: /config/yolo_nas_s.onnx
|
||||||
labelmap_path: /labelmap/coco-80.txt
|
labelmap_path: /labelmap/coco-80.txt
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ title: Available Objects
|
|||||||
|
|
||||||
import labels from "../../../labelmap.txt";
|
import labels from "../../../labelmap.txt";
|
||||||
|
|
||||||
Frigate includes the object models listed below from the Google Coral test data.
|
Frigate includes the object labels listed below from the Google Coral test data.
|
||||||
|
|
||||||
Please note:
|
Please note:
|
||||||
|
|
||||||
|
|||||||
@@ -518,6 +518,9 @@ semantic_search:
|
|||||||
enabled: False
|
enabled: False
|
||||||
# Optional: Re-index embeddings database from historical tracked objects (default: shown below)
|
# Optional: Re-index embeddings database from historical tracked objects (default: shown below)
|
||||||
reindex: False
|
reindex: False
|
||||||
|
# Optional: Set the model size used for embeddings. (default: shown below)
|
||||||
|
# NOTE: small model runs on CPU and large model runs on GPU
|
||||||
|
model_size: "small"
|
||||||
|
|
||||||
# Optional: Configuration for AI generated tracked object descriptions
|
# Optional: Configuration for AI generated tracked object descriptions
|
||||||
# NOTE: Semantic Search must be enabled for this to do anything.
|
# NOTE: Semantic Search must be enabled for this to do anything.
|
||||||
@@ -545,10 +548,12 @@ genai:
|
|||||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.2)
|
# Uses https://github.com/AlexxIT/go2rtc (v1.9.2)
|
||||||
go2rtc:
|
go2rtc:
|
||||||
|
|
||||||
# Optional: jsmpeg stream configuration for WebUI
|
# Optional: Live stream configuration for WebUI.
|
||||||
|
# NOTE: Can be overridden at the camera level
|
||||||
live:
|
live:
|
||||||
# Optional: Set the name of the stream that should be used for live view
|
# Optional: Set the name of the stream configured in go2rtc
|
||||||
# in frigate WebUI. (default: name of camera)
|
# that should be used for live view in frigate WebUI. (default: name of camera)
|
||||||
|
# NOTE: In most cases this should be set at the camera level only.
|
||||||
stream_name: camera_name
|
stream_name: camera_name
|
||||||
# Optional: Set the height of the jsmpeg stream. (default: 720)
|
# Optional: Set the height of the jsmpeg stream. (default: 720)
|
||||||
# This must be less than or equal to the height of the detect stream. Lower resolutions
|
# This must be less than or equal to the height of the detect stream. Lower resolutions
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ title: Restream
|
|||||||
|
|
||||||
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
|
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
|
||||||
|
|
||||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.4) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#configuration) for more advanced configurations and features.
|
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.2) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#configuration) for more advanced configurations and features.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
@@ -134,7 +134,7 @@ cameras:
|
|||||||
|
|
||||||
## Advanced Restream Configurations
|
## Advanced Restream Configurations
|
||||||
|
|
||||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||||
|
|
||||||
NOTE: The output will need to be passed with two curly braces `{{output}}`
|
NOTE: The output will need to be passed with two curly braces `{{output}}`
|
||||||
|
|
||||||
|
|||||||
@@ -5,13 +5,21 @@ title: Using Semantic Search
|
|||||||
|
|
||||||
Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results.
|
Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results.
|
||||||
|
|
||||||
Frigate has support for two models to create embeddings, both of which run locally: [OpenAI CLIP](https://openai.com/research/clip) and [all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2). Embeddings are then saved to Frigate's database.
|
Frigate has support for [Jina AI's CLIP model](https://huggingface.co/jinaai/jina-clip-v1) to create embeddings, which runs locally. Embeddings are then saved to Frigate's database.
|
||||||
|
|
||||||
Semantic Search is accessed via the _Explore_ view in the Frigate UI.
|
Semantic Search is accessed via the _Explore_ view in the Frigate UI.
|
||||||
|
|
||||||
|
## Minimum System Requirements
|
||||||
|
|
||||||
|
Semantic Search works by running a large AI model locally on your system. Small or underpowered systems like a Raspberry Pi will not run Semantic Search reliably or at all.
|
||||||
|
|
||||||
|
A minimum of 8GB of RAM is required to use Semantic Search. A GPU is not strictly required but will provide a significant performance increase over CPU-only systems.
|
||||||
|
|
||||||
|
For best performance, 16GB or more of RAM and a dedicated GPU are recommended.
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
Semantic search is disabled by default, and must be enabled in your config file before it can be used. Semantic Search is a global configuration setting.
|
Semantic Search is disabled by default, and must be enabled in your config file before it can be used. Semantic Search is a global configuration setting.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
semantic_search:
|
semantic_search:
|
||||||
@@ -27,18 +35,58 @@ If you are enabling the Search feature for the first time, be advised that Friga
|
|||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
### OpenAI CLIP
|
### Jina AI CLIP
|
||||||
|
|
||||||
This model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
||||||
|
|
||||||
### all-MiniLM-L6-v2
|
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
||||||
|
|
||||||
This is a sentence embedding model that has been fine tuned on over 1 billion sentence pairs. This model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
Differently weighted CLIP models are available and can be selected by setting the `model_size` config option as `small` or `large`:
|
||||||
|
|
||||||
## Usage
|
```yaml
|
||||||
|
semantic_search:
|
||||||
|
enabled: True
|
||||||
|
model_size: small
|
||||||
|
```
|
||||||
|
|
||||||
1. Semantic search is used in conjunction with the other filters available on the Search page. Use a combination of traditional filtering and semantic search for the best results.
|
- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
|
||||||
2. The comparison between text and image embedding distances generally means that results matching `description` will appear first, even if a `thumbnail` embedding may be a better match. Play with the "Search Type" filter to help find what you are looking for.
|
- Configuring the `small` model employs a quantized version of the model that uses less RAM and runs on CPU with a very negligible difference in embedding quality.
|
||||||
3. Make your search language and tone closely match your descriptions. If you are using thumbnail search, phrase your query as an image caption.
|
|
||||||
4. Semantic search on thumbnails tends to return better results when matching large subjects that take up most of the frame. Small things like "cat" tend to not work well.
|
### GPU Acceleration
|
||||||
5. Experiment! Find a tracked object you want to test and start typing keywords to see what works for you.
|
|
||||||
|
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
semantic_search:
|
||||||
|
enabled: True
|
||||||
|
model_size: large
|
||||||
|
```
|
||||||
|
|
||||||
|
:::info
|
||||||
|
|
||||||
|
If the correct build is used for your GPU and the `large` model is configured, then the GPU will be detected and used automatically.
|
||||||
|
|
||||||
|
**NOTE:** Object detection and Semantic Search are independent features. If you want to use your GPU with Semantic Search, you must choose the appropriate Frigate Docker image for your GPU.
|
||||||
|
|
||||||
|
- **AMD**
|
||||||
|
|
||||||
|
- ROCm will automatically be detected and used for Semantic Search in the `-rocm` Frigate image.
|
||||||
|
|
||||||
|
- **Intel**
|
||||||
|
|
||||||
|
- OpenVINO will automatically be detected and used for Semantic Search in the default Frigate image.
|
||||||
|
|
||||||
|
- **Nvidia**
|
||||||
|
- Nvidia GPUs will automatically be detected and used for Semantic Search in the `-tensorrt` Frigate image.
|
||||||
|
- Jetson devices will automatically be detected and used for Semantic Search in the `-tensorrt-jp(4/5)` Frigate image.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Usage and Best Practices
|
||||||
|
|
||||||
|
1. Semantic Search is used in conjunction with the other filters available on the Search page. Use a combination of traditional filtering and Semantic Search for the best results.
|
||||||
|
2. Use the thumbnail search type when searching for particular objects in the scene. Use the description search type when attempting to discern the intent of your object.
|
||||||
|
3. Because of how the AI models Frigate uses have been trained, the comparison between text and image embedding distances generally means that with multi-modal (`thumbnail` and `description`) searches, results matching `description` will appear first, even if a `thumbnail` embedding may be a better match. Play with the "Search Type" setting to help find what you are looking for. Note that if you are generating descriptions for specific objects or zones only, this may cause search results to prioritize the objects with descriptions even if the the ones without them are more relevant.
|
||||||
|
4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day".
|
||||||
|
5. Semantic search on thumbnails tends to return better results when matching large subjects that take up most of the frame. Small things like "cat" tend to not work well.
|
||||||
|
6. Experiment! Find a tracked object you want to test and start typing keywords and phrases to see what works for you.
|
||||||
|
|||||||
@@ -81,15 +81,15 @@ You can calculate the **minimum** shm size for each camera with the following fo
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
# Replace <width> and <height>
|
# Replace <width> and <height>
|
||||||
$ python -c 'print("{:.2f}MB".format((<width> * <height> * 1.5 * 10 + 270480) / 1048576))'
|
$ python -c 'print("{:.2f}MB".format((<width> * <height> * 1.5 * 20 + 270480) / 1048576))'
|
||||||
|
|
||||||
# Example for 1280x720
|
# Example for 1280x720, including logs
|
||||||
$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 10 + 270480) / 1048576))'
|
$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 20 + 270480) / 1048576)) + 40'
|
||||||
13.44MB
|
46.63MB
|
||||||
|
|
||||||
# Example for eight cameras detecting at 1280x720, including logs
|
# Example for eight cameras detecting at 1280x720, including logs
|
||||||
$ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 10 + 270480) / 1048576) * 8 + 40))'
|
$ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 20 + 270480) / 1048576) * 8 + 40))'
|
||||||
136.99MB
|
253MB
|
||||||
```
|
```
|
||||||
|
|
||||||
The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration.
|
The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration.
|
||||||
@@ -194,7 +194,7 @@ services:
|
|||||||
privileged: true # this may not be necessary for all setups
|
privileged: true # this may not be necessary for all setups
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
image: ghcr.io/blakeblackshear/frigate:stable
|
image: ghcr.io/blakeblackshear/frigate:stable
|
||||||
shm_size: "64mb" # update for your cameras based on calculation above
|
shm_size: "512mb" # update for your cameras based on calculation above
|
||||||
devices:
|
devices:
|
||||||
- /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions
|
- /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions
|
||||||
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
|
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
|
||||||
@@ -250,10 +250,7 @@ The community supported docker image tags for the current stable version are:
|
|||||||
- `stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5
|
- `stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5
|
||||||
- `stable-tensorrt-jp4` - Frigate build optimized for nvidia Jetson devices running Jetpack 4.6
|
- `stable-tensorrt-jp4` - Frigate build optimized for nvidia Jetson devices running Jetpack 4.6
|
||||||
- `stable-rk` - Frigate build for SBCs with Rockchip SoC
|
- `stable-rk` - Frigate build for SBCs with Rockchip SoC
|
||||||
- `stable-rocm` - Frigate build for [AMD GPUs and iGPUs](../configuration/object_detectors.md#amdrocm-gpu-detector), all drivers
|
- `stable-rocm` - Frigate build for [AMD GPUs](../configuration/object_detectors.md#amdrocm-gpu-detector)
|
||||||
- `stable-rocm-gfx900` - AMD gfx900 driver only
|
|
||||||
- `stable-rocm-gfx1030` - AMD gfx1030 driver only
|
|
||||||
- `stable-rocm-gfx1100` - AMD gfx1100 driver only
|
|
||||||
- `stable-h8l` - Frigate build for the Hailo-8L M.2 PICe Raspberry Pi 5 hat
|
- `stable-h8l` - Frigate build for the Hailo-8L M.2 PICe Raspberry Pi 5 hat
|
||||||
|
|
||||||
## Home Assistant Addon
|
## Home Assistant Addon
|
||||||
|
|||||||
@@ -13,7 +13,15 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
|
|||||||
|
|
||||||
# Setup a go2rtc stream
|
# Setup a go2rtc stream
|
||||||
|
|
||||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. For the best experience, you should set the stream name under go2rtc to match the name of your camera so that Frigate will automatically map it and be able to use better live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#module-streams), not just rtsp.
|
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#module-streams), not just rtsp.
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
|
||||||
|
For the best experience, you should set the stream name under `go2rtc` to match the name of your camera so that Frigate will automatically map it and be able to use better live view options for the camera.
|
||||||
|
|
||||||
|
See [the live view docs](../configuration/live.md#setting-stream-for-live-ui) for more information.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
go2rtc:
|
go2rtc:
|
||||||
@@ -39,8 +47,8 @@ After adding this to the config, restart Frigate and try to watch the live strea
|
|||||||
|
|
||||||
- Check Video Codec:
|
- Check Video Codec:
|
||||||
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
|
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
|
||||||
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#codecs-madness) in go2rtc documentation.
|
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#codecs-madness) in go2rtc documentation.
|
||||||
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
|
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
|
||||||
```yaml
|
```yaml
|
||||||
go2rtc:
|
go2rtc:
|
||||||
streams:
|
streams:
|
||||||
|
|||||||
@@ -306,7 +306,9 @@ By default, Frigate will retain video of all tracked objects for 10 days. The fu
|
|||||||
|
|
||||||
### Step 7: Complete config
|
### Step 7: Complete config
|
||||||
|
|
||||||
At this point you have a complete config with basic functionality. You can see the [full config reference](../configuration/reference.md) for a complete list of configuration options.
|
At this point you have a complete config with basic functionality.
|
||||||
|
- View [common configuration examples](../configuration/index.md#common-configuration-examples) for a list of common configuration examples.
|
||||||
|
- View [full config reference](../configuration/reference.md) for a complete list of configuration options.
|
||||||
|
|
||||||
### Follow up
|
### Follow up
|
||||||
|
|
||||||
|
|||||||
@@ -94,6 +94,18 @@ Message published for each changed tracked object. The first message is publishe
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `frigate/tracked_object_update`
|
||||||
|
|
||||||
|
Message published for updates to tracked object metadata, for example when GenAI runs and returns a tracked object description.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "description",
|
||||||
|
"id": "1607123955.475377-mxklsc",
|
||||||
|
"description": "The car is a red sedan moving away from the camera."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### `frigate/reviews`
|
### `frigate/reviews`
|
||||||
|
|
||||||
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. When additional objects are detected or when a zone change occurs, it will publish a, `update` message with the same id. When the review activity has ended a final `end` message is published.
|
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. When additional objects are detected or when a zone change occurs, it will publish a, `update` message with the same id. When the review activity has ended a final `end` message is published.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ title: Requesting your first model
|
|||||||
|
|
||||||
## Step 1: Upload and annotate your images
|
## Step 1: Upload and annotate your images
|
||||||
|
|
||||||
Before requesting your first model, you will need to upload at least 10 images to Frigate+. But for the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate.
|
Before requesting your first model, you will need to upload and verify at least 1 image to Frigate+. The more images you upload, annotate, and verify the better your results will be. Most users start to see very good results once they have at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate.
|
||||||
|
|
||||||
It is recommended to submit **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
It is recommended to submit **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
||||||
|
|
||||||
@@ -13,7 +13,7 @@ For more detailed recommendations, you can refer to the docs on [improving your
|
|||||||
|
|
||||||
## Step 2: Submit a model request
|
## Step 2: Submit a model request
|
||||||
|
|
||||||
Once you have an initial set of verified images, you can request a model on the Models page. Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours.
|
Once you have an initial set of verified images, you can request a model on the Models page. For guidance on choosing a model type, refer to [this part of the documentation](./index.md#available-model-types). Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours.
|
||||||

|

|
||||||
|
|
||||||
## Step 3: Set your model id in the config
|
## Step 3: Set your model id in the config
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ id: improving_model
|
|||||||
title: Improving your model
|
title: Improving your model
|
||||||
---
|
---
|
||||||
|
|
||||||
You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. Because a limited number of users submitted images to Frigate+ prior to this launch, you may need to submit several hundred images per camera to see good results. With all the new images now being submitted, future base models will improve as more and more users (including you) submit examples to Frigate+. Note that only verified images will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
|
You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. With all the new images now being submitted by subscribers, future base models will improve as more and more examples are incorporated. Note that only images with at least one verified label will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
|
||||||
|
|
||||||
- **Submit both true positives and false positives**. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
- **Submit both true positives and false positives**. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
||||||
- **Lower your thresholds a little in order to generate more false/true positives near the threshold value**. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores.
|
- **Lower your thresholds a little in order to generate more false/true positives near the threshold value**. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores.
|
||||||
@@ -37,7 +37,7 @@ Misidentified objects should have a correct label added. For example, if a perso
|
|||||||
## Shortcuts for a faster workflow
|
## Shortcuts for a faster workflow
|
||||||
|
|
||||||
| Shortcut Key | Description |
|
| Shortcut Key | Description |
|
||||||
|-----|--------|
|
| ----------------- | ----------------------------- |
|
||||||
| `?` | Show all keyboard shortcuts |
|
| `?` | Show all keyboard shortcuts |
|
||||||
| `w` | Add box |
|
| `w` | Add box |
|
||||||
| `d` | Toggle difficult |
|
| `d` | Toggle difficult |
|
||||||
@@ -47,7 +47,6 @@ Misidentified objects should have a correct label added. For example, if a perso
|
|||||||
| `esc` | Deselect/Cancel |
|
| `esc` | Deselect/Cancel |
|
||||||
| `← ↑ → ↓` | Move box |
|
| `← ↑ → ↓` | Move box |
|
||||||
| `Shift + ← ↑ → ↓` | Resize box |
|
| `Shift + ← ↑ → ↓` | Resize box |
|
||||||
|`-`|Zoom out|
|
| `scrollwheel` | Zoom in/out |
|
||||||
|`=`|Zoom in|
|
|
||||||
| `f` | Hide/show all but current box |
|
| `f` | Hide/show all but current box |
|
||||||
| `spacebar` | Verify and save |
|
| `spacebar` | Verify and save |
|
||||||
|
|||||||
@@ -15,17 +15,36 @@ With a subscription, 12 model trainings per year are included. If you cancel you
|
|||||||
|
|
||||||
Information on how to integrate Frigate+ with Frigate can be found in the [integration docs](../integrations/plus.md).
|
Information on how to integrate Frigate+ with Frigate can be found in the [integration docs](../integrations/plus.md).
|
||||||
|
|
||||||
|
## Available model types
|
||||||
|
|
||||||
|
There are two model types offered in Frigate+: `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
|
||||||
|
|
||||||
|
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types).
|
||||||
|
|
||||||
|
| Model Type | Description |
|
||||||
|
| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
|
||||||
|
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
|
||||||
|
|
||||||
## Supported detector types
|
## Supported detector types
|
||||||
|
|
||||||
|
Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), ONNX (`onnx`), and ROCm (`rocm`) detectors.
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
|
|
||||||
Frigate+ models are not supported for TensorRT or OpenVino yet.
|
Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15, which is still under development.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
Currently, Frigate+ models only support CPU (`cpu`) and Coral (`edgetpu`) models. OpenVino is next in line to gain support.
|
| Hardware | Recommended Detector Type | Recommended Model Type |
|
||||||
|
| ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ---------------------- |
|
||||||
|
| [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended) | `cpu` | `mobiledet` |
|
||||||
|
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` |
|
||||||
|
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` |
|
||||||
|
| [NVidia GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` |
|
||||||
|
| [AMD ROCm GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#amdrocm-gpu-detector)\* | `rocm` | `yolonas` |
|
||||||
|
|
||||||
The models are created using the same MobileDet architecture as the default model. Additional architectures will be added in future releases as needed.
|
_\* Requires Frigate 0.15_
|
||||||
|
|
||||||
## Available label types
|
## Available label types
|
||||||
|
|
||||||
|
|||||||
@@ -49,7 +49,10 @@ The USB Coral can become stuck and need to be restarted, this can happen for a n
|
|||||||
|
|
||||||
## PCIe Coral Not Detected
|
## PCIe Coral Not Detected
|
||||||
|
|
||||||
The most common reason for the PCIe coral not being detected is that the driver has not been installed. See [the coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) for how to install the driver for the PCIe based coral.
|
The most common reason for the PCIe Coral not being detected is that the driver has not been installed. This process varies based on what OS and kernel that is being run.
|
||||||
|
|
||||||
|
- In most cases [the Coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) show how to install the driver for the PCIe based Coral.
|
||||||
|
- For Ubuntu 22.04+ https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
|
||||||
|
|
||||||
## Only One PCIe Coral Is Detected With Coral Dual EdgeTPU
|
## Only One PCIe Coral Is Detected With Coral Dual EdgeTPU
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ const sidebars: SidebarsConfig = {
|
|||||||
{
|
{
|
||||||
type: 'link',
|
type: 'link',
|
||||||
label: 'Go2RTC Configuration Reference',
|
label: 'Go2RTC Configuration Reference',
|
||||||
href: 'https://github.com/AlexxIT/go2rtc/tree/v1.9.4#configuration',
|
href: 'https://github.com/AlexxIT/go2rtc/tree/v1.9.2#configuration',
|
||||||
} as PropSidebarItemLink,
|
} as PropSidebarItemLink,
|
||||||
],
|
],
|
||||||
Detectors: [
|
Detectors: [
|
||||||
|
|||||||
480
docs/static/frigate-api.yaml
vendored
480
docs/static/frigate-api.yaml
vendored
@@ -7,7 +7,7 @@ info:
|
|||||||
|
|
||||||
servers:
|
servers:
|
||||||
- url: https://demo.frigate.video/api
|
- url: https://demo.frigate.video/api
|
||||||
- url: http://localhost:5001/
|
- url: http://localhost:5001/api
|
||||||
|
|
||||||
paths:
|
paths:
|
||||||
/auth:
|
/auth:
|
||||||
@@ -172,76 +172,65 @@ paths:
|
|||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: string
|
||||||
- type: string
|
|
||||||
- type: 'null'
|
|
||||||
default: all
|
default: all
|
||||||
title: Cameras
|
title: Cameras
|
||||||
- name: labels
|
- name: labels
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: string
|
||||||
- type: string
|
|
||||||
- type: 'null'
|
|
||||||
default: all
|
default: all
|
||||||
title: Labels
|
title: Labels
|
||||||
- name: zones
|
- name: zones
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: string
|
||||||
- type: string
|
|
||||||
- type: 'null'
|
|
||||||
default: all
|
default: all
|
||||||
title: Zones
|
title: Zones
|
||||||
- name: reviewed
|
- name: reviewed
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: integer
|
||||||
- type: integer
|
|
||||||
- type: 'null'
|
|
||||||
default: 0
|
default: 0
|
||||||
title: Reviewed
|
title: Reviewed
|
||||||
- name: limit
|
- name: limit
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: integer
|
||||||
- type: integer
|
|
||||||
- type: 'null'
|
|
||||||
title: Limit
|
title: Limit
|
||||||
- name: severity
|
- name: severity
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
allOf:
|
||||||
- type: string
|
- $ref: '#/components/schemas/SeverityEnum'
|
||||||
- type: 'null'
|
|
||||||
title: Severity
|
title: Severity
|
||||||
- name: before
|
- name: before
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: number
|
||||||
- type: number
|
|
||||||
- type: 'null'
|
|
||||||
title: Before
|
title: Before
|
||||||
- name: after
|
- name: after
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: number
|
||||||
- type: number
|
|
||||||
- type: 'null'
|
|
||||||
title: After
|
title: After
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
description: Successful Response
|
description: Successful Response
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema: { }
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/ReviewSegmentResponse'
|
||||||
|
title: Response Review Review Get
|
||||||
'422':
|
'422':
|
||||||
description: Validation Error
|
description: Validation Error
|
||||||
content:
|
content:
|
||||||
@@ -259,36 +248,28 @@ paths:
|
|||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: string
|
||||||
- type: string
|
|
||||||
- type: 'null'
|
|
||||||
default: all
|
default: all
|
||||||
title: Cameras
|
title: Cameras
|
||||||
- name: labels
|
- name: labels
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: string
|
||||||
- type: string
|
|
||||||
- type: 'null'
|
|
||||||
default: all
|
default: all
|
||||||
title: Labels
|
title: Labels
|
||||||
- name: zones
|
- name: zones
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: string
|
||||||
- type: string
|
|
||||||
- type: 'null'
|
|
||||||
default: all
|
default: all
|
||||||
title: Zones
|
title: Zones
|
||||||
- name: timezone
|
- name: timezone
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: string
|
||||||
- type: string
|
|
||||||
- type: 'null'
|
|
||||||
default: utc
|
default: utc
|
||||||
title: Timezone
|
title: Timezone
|
||||||
responses:
|
responses:
|
||||||
@@ -296,7 +277,8 @@ paths:
|
|||||||
description: Successful Response
|
description: Successful Response
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema: { }
|
schema:
|
||||||
|
$ref: '#/components/schemas/ReviewSummaryResponse'
|
||||||
'422':
|
'422':
|
||||||
description: Validation Error
|
description: Validation Error
|
||||||
content:
|
content:
|
||||||
@@ -310,17 +292,18 @@ paths:
|
|||||||
summary: Set Multiple Reviewed
|
summary: Set Multiple Reviewed
|
||||||
operationId: set_multiple_reviewed_reviews_viewed_post
|
operationId: set_multiple_reviewed_reviews_viewed_post
|
||||||
requestBody:
|
requestBody:
|
||||||
|
required: true
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
type: object
|
$ref: '#/components/schemas/ReviewModifyMultipleBody'
|
||||||
title: Body
|
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
description: Successful Response
|
description: Successful Response
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema: { }
|
schema:
|
||||||
|
$ref: '#/components/schemas/GenericResponse'
|
||||||
'422':
|
'422':
|
||||||
description: Validation Error
|
description: Validation Error
|
||||||
content:
|
content:
|
||||||
@@ -334,17 +317,18 @@ paths:
|
|||||||
summary: Delete Reviews
|
summary: Delete Reviews
|
||||||
operationId: delete_reviews_reviews_delete_post
|
operationId: delete_reviews_reviews_delete_post
|
||||||
requestBody:
|
requestBody:
|
||||||
|
required: true
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
type: object
|
$ref: '#/components/schemas/ReviewModifyMultipleBody'
|
||||||
title: Body
|
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
description: Successful Response
|
description: Successful Response
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema: { }
|
schema:
|
||||||
|
$ref: '#/components/schemas/GenericResponse'
|
||||||
'422':
|
'422':
|
||||||
description: Validation Error
|
description: Validation Error
|
||||||
content:
|
content:
|
||||||
@@ -363,96 +347,38 @@ paths:
|
|||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: string
|
||||||
- type: string
|
|
||||||
- type: 'null'
|
|
||||||
default: all
|
default: all
|
||||||
title: Cameras
|
title: Cameras
|
||||||
- name: before
|
- name: before
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: number
|
||||||
- type: number
|
|
||||||
- type: 'null'
|
|
||||||
title: Before
|
title: Before
|
||||||
- name: after
|
- name: after
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: number
|
||||||
- type: number
|
|
||||||
- type: 'null'
|
|
||||||
title: After
|
title: After
|
||||||
- name: scale
|
- name: scale
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
anyOf:
|
type: integer
|
||||||
- type: integer
|
|
||||||
- type: 'null'
|
|
||||||
default: 30
|
default: 30
|
||||||
title: Scale
|
title: Scale
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
description: Successful Response
|
description: Successful Response
|
||||||
content:
|
|
||||||
application/json:
|
|
||||||
schema: { }
|
|
||||||
'422':
|
|
||||||
description: Validation Error
|
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/HTTPValidationError'
|
type: array
|
||||||
/review/activity/audio:
|
items:
|
||||||
get:
|
$ref: '#/components/schemas/ReviewActivityMotionResponse'
|
||||||
tags:
|
title: Response Motion Activity Review Activity Motion Get
|
||||||
- Review
|
|
||||||
summary: Audio Activity
|
|
||||||
description: Get motion and audio activity.
|
|
||||||
operationId: audio_activity_review_activity_audio_get
|
|
||||||
parameters:
|
|
||||||
- name: cameras
|
|
||||||
in: query
|
|
||||||
required: false
|
|
||||||
schema:
|
|
||||||
anyOf:
|
|
||||||
- type: string
|
|
||||||
- type: 'null'
|
|
||||||
default: all
|
|
||||||
title: Cameras
|
|
||||||
- name: before
|
|
||||||
in: query
|
|
||||||
required: false
|
|
||||||
schema:
|
|
||||||
anyOf:
|
|
||||||
- type: number
|
|
||||||
- type: 'null'
|
|
||||||
title: Before
|
|
||||||
- name: after
|
|
||||||
in: query
|
|
||||||
required: false
|
|
||||||
schema:
|
|
||||||
anyOf:
|
|
||||||
- type: number
|
|
||||||
- type: 'null'
|
|
||||||
title: After
|
|
||||||
- name: scale
|
|
||||||
in: query
|
|
||||||
required: false
|
|
||||||
schema:
|
|
||||||
anyOf:
|
|
||||||
- type: integer
|
|
||||||
- type: 'null'
|
|
||||||
default: 30
|
|
||||||
title: Scale
|
|
||||||
responses:
|
|
||||||
'200':
|
|
||||||
description: Successful Response
|
|
||||||
content:
|
|
||||||
application/json:
|
|
||||||
schema: { }
|
|
||||||
'422':
|
'422':
|
||||||
description: Validation Error
|
description: Validation Error
|
||||||
content:
|
content:
|
||||||
@@ -477,57 +403,60 @@ paths:
|
|||||||
description: Successful Response
|
description: Successful Response
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema: { }
|
schema:
|
||||||
|
$ref: '#/components/schemas/ReviewSegmentResponse'
|
||||||
'422':
|
'422':
|
||||||
description: Validation Error
|
description: Validation Error
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/HTTPValidationError'
|
$ref: '#/components/schemas/HTTPValidationError'
|
||||||
/review/{event_id}:
|
/review/{review_id}:
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- Review
|
- Review
|
||||||
summary: Get Review
|
summary: Get Review
|
||||||
operationId: get_review_review__event_id__get
|
operationId: get_review_review__review_id__get
|
||||||
parameters:
|
parameters:
|
||||||
- name: event_id
|
- name: review_id
|
||||||
in: path
|
in: path
|
||||||
required: true
|
required: true
|
||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
title: Event Id
|
title: Review Id
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
description: Successful Response
|
description: Successful Response
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema: { }
|
schema:
|
||||||
|
$ref: '#/components/schemas/ReviewSegmentResponse'
|
||||||
'422':
|
'422':
|
||||||
description: Validation Error
|
description: Validation Error
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/HTTPValidationError'
|
$ref: '#/components/schemas/HTTPValidationError'
|
||||||
/review/{event_id}/viewed:
|
/review/{review_id}/viewed:
|
||||||
delete:
|
delete:
|
||||||
tags:
|
tags:
|
||||||
- Review
|
- Review
|
||||||
summary: Set Not Reviewed
|
summary: Set Not Reviewed
|
||||||
operationId: set_not_reviewed_review__event_id__viewed_delete
|
operationId: set_not_reviewed_review__review_id__viewed_delete
|
||||||
parameters:
|
parameters:
|
||||||
- name: event_id
|
- name: review_id
|
||||||
in: path
|
in: path
|
||||||
required: true
|
required: true
|
||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
title: Event Id
|
title: Review Id
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
description: Successful Response
|
description: Successful Response
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema: { }
|
schema:
|
||||||
|
$ref: '#/components/schemas/GenericResponse'
|
||||||
'422':
|
'422':
|
||||||
description: Validation Error
|
description: Validation Error
|
||||||
content:
|
content:
|
||||||
@@ -763,13 +692,25 @@ paths:
|
|||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema: { }
|
schema: { }
|
||||||
|
/nvinfo:
|
||||||
|
get:
|
||||||
|
tags:
|
||||||
|
- App
|
||||||
|
summary: Nvinfo
|
||||||
|
operationId: nvinfo_nvinfo_get
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: Successful Response
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema: { }
|
||||||
/logs/{service}:
|
/logs/{service}:
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- App
|
- App
|
||||||
- Logs
|
- Logs
|
||||||
summary: Logs
|
summary: Logs
|
||||||
description: Get logs for the requested service (frigate/nginx/go2rtc/chroma)
|
description: Get logs for the requested service (frigate/nginx/go2rtc)
|
||||||
operationId: logs_logs__service__get
|
operationId: logs_logs__service__get
|
||||||
parameters:
|
parameters:
|
||||||
- name: service
|
- name: service
|
||||||
@@ -781,7 +722,6 @@ paths:
|
|||||||
- frigate
|
- frigate
|
||||||
- nginx
|
- nginx
|
||||||
- go2rtc
|
- go2rtc
|
||||||
- chroma
|
|
||||||
title: Service
|
title: Service
|
||||||
- name: download
|
- name: download
|
||||||
in: query
|
in: query
|
||||||
@@ -1042,7 +982,8 @@ paths:
|
|||||||
- Preview
|
- Preview
|
||||||
summary: Preview Hour
|
summary: Preview Hour
|
||||||
description: Get all mp4 previews relevant for time period given the timezone
|
description: Get all mp4 previews relevant for time period given the timezone
|
||||||
operationId: preview_hour_preview__year_month___day___hour___camera_name___tz_name__get
|
operationId: >-
|
||||||
|
preview_hour_preview__year_month___day___hour___camera_name___tz_name__get
|
||||||
parameters:
|
parameters:
|
||||||
- name: year_month
|
- name: year_month
|
||||||
in: path
|
in: path
|
||||||
@@ -1092,7 +1033,8 @@ paths:
|
|||||||
- Preview
|
- Preview
|
||||||
summary: Get Preview Frames From Cache
|
summary: Get Preview Frames From Cache
|
||||||
description: Get list of cached preview frames
|
description: Get list of cached preview frames
|
||||||
operationId: get_preview_frames_from_cache_preview__camera_name__start__start_ts__end__end_ts__frames_get
|
operationId: >-
|
||||||
|
get_preview_frames_from_cache_preview__camera_name__start__start_ts__end__end_ts__frames_get
|
||||||
parameters:
|
parameters:
|
||||||
- name: camera_name
|
- name: camera_name
|
||||||
in: path
|
in: path
|
||||||
@@ -1177,7 +1119,8 @@ paths:
|
|||||||
tags:
|
tags:
|
||||||
- Export
|
- Export
|
||||||
summary: Export Recording
|
summary: Export Recording
|
||||||
operationId: export_recording_export__camera_name__start__start_time__end__end_time__post
|
operationId: >-
|
||||||
|
export_recording_export__camera_name__start__start_time__end__end_time__post
|
||||||
parameters:
|
parameters:
|
||||||
- name: camera_name
|
- name: camera_name
|
||||||
in: path
|
in: path
|
||||||
@@ -1198,11 +1141,11 @@ paths:
|
|||||||
type: number
|
type: number
|
||||||
title: End Time
|
title: End Time
|
||||||
requestBody:
|
requestBody:
|
||||||
|
required: true
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
type: object
|
$ref: '#/components/schemas/ExportRecordingsBody'
|
||||||
title: Body
|
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
description: Successful Response
|
description: Successful Response
|
||||||
@@ -1465,6 +1408,14 @@ paths:
|
|||||||
- type: number
|
- type: number
|
||||||
- type: 'null'
|
- type: 'null'
|
||||||
title: Max Length
|
title: Max Length
|
||||||
|
- name: event_id
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
anyOf:
|
||||||
|
- type: string
|
||||||
|
- type: 'null'
|
||||||
|
title: Event Id
|
||||||
- name: sort
|
- name: sort
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
@@ -1575,7 +1526,7 @@ paths:
|
|||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
- type: 'null'
|
- type: 'null'
|
||||||
default: thumbnail,description
|
default: thumbnail
|
||||||
title: Search Type
|
title: Search Type
|
||||||
- name: include_thumbnails
|
- name: include_thumbnails
|
||||||
in: query
|
in: query
|
||||||
@@ -1647,6 +1598,22 @@ paths:
|
|||||||
- type: 'null'
|
- type: 'null'
|
||||||
default: 00:00,24:00
|
default: 00:00,24:00
|
||||||
title: Time Range
|
title: Time Range
|
||||||
|
- name: has_clip
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
anyOf:
|
||||||
|
- type: boolean
|
||||||
|
- type: 'null'
|
||||||
|
title: Has Clip
|
||||||
|
- name: has_snapshot
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
anyOf:
|
||||||
|
- type: boolean
|
||||||
|
- type: 'null'
|
||||||
|
title: Has Snapshot
|
||||||
- name: timezone
|
- name: timezone
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
@@ -1656,6 +1623,30 @@ paths:
|
|||||||
- type: 'null'
|
- type: 'null'
|
||||||
default: utc
|
default: utc
|
||||||
title: Timezone
|
title: Timezone
|
||||||
|
- name: min_score
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
anyOf:
|
||||||
|
- type: number
|
||||||
|
- type: 'null'
|
||||||
|
title: Min Score
|
||||||
|
- name: max_score
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
anyOf:
|
||||||
|
- type: number
|
||||||
|
- type: 'null'
|
||||||
|
title: Max Score
|
||||||
|
- name: sort
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
anyOf:
|
||||||
|
- type: string
|
||||||
|
- type: 'null'
|
||||||
|
title: Sort
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
description: Successful Response
|
description: Successful Response
|
||||||
@@ -1942,6 +1933,15 @@ paths:
|
|||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
title: Event Id
|
title: Event Id
|
||||||
|
- name: source
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
anyOf:
|
||||||
|
- $ref: '#/components/schemas/RegenerateDescriptionEnum'
|
||||||
|
- type: 'null'
|
||||||
|
default: thumbnails
|
||||||
|
title: Source
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
description: Successful Response
|
description: Successful Response
|
||||||
@@ -2029,12 +2029,12 @@ paths:
|
|||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/HTTPValidationError'
|
$ref: '#/components/schemas/HTTPValidationError'
|
||||||
'{camera_name}':
|
/{camera_name}:
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- Media
|
- Media
|
||||||
summary: Mjpeg Feed
|
summary: Mjpeg Feed
|
||||||
operationId: mjpeg_feed_camera_name__get
|
operationId: mjpeg_feed__camera_name__get
|
||||||
parameters:
|
parameters:
|
||||||
- name: camera_name
|
- name: camera_name
|
||||||
in: path
|
in: path
|
||||||
@@ -2241,7 +2241,8 @@ paths:
|
|||||||
tags:
|
tags:
|
||||||
- Media
|
- Media
|
||||||
summary: Get Snapshot From Recording
|
summary: Get Snapshot From Recording
|
||||||
operationId: get_snapshot_from_recording__camera_name__recordings__frame_time__snapshot__format__get
|
operationId: >-
|
||||||
|
get_snapshot_from_recording__camera_name__recordings__frame_time__snapshot__format__get
|
||||||
parameters:
|
parameters:
|
||||||
- name: camera_name
|
- name: camera_name
|
||||||
in: path
|
in: path
|
||||||
@@ -2363,7 +2364,9 @@ paths:
|
|||||||
tags:
|
tags:
|
||||||
- Media
|
- Media
|
||||||
summary: Recordings
|
summary: Recordings
|
||||||
description: Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used
|
description: >-
|
||||||
|
Return specific camera recordings between the given 'after'/'end' times.
|
||||||
|
If not provided the last hour will be used
|
||||||
operationId: recordings__camera_name__recordings_get
|
operationId: recordings__camera_name__recordings_get
|
||||||
parameters:
|
parameters:
|
||||||
- name: camera_name
|
- name: camera_name
|
||||||
@@ -2377,14 +2380,14 @@ paths:
|
|||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
type: number
|
type: number
|
||||||
default: 1727542549.303557
|
default: 1731275308.238304
|
||||||
title: After
|
title: After
|
||||||
- name: before
|
- name: before
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
type: number
|
type: number
|
||||||
default: 1727546149.303926
|
default: 1731278908.238313
|
||||||
title: Before
|
title: Before
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
@@ -2423,13 +2426,6 @@ paths:
|
|||||||
schema:
|
schema:
|
||||||
type: number
|
type: number
|
||||||
title: End Ts
|
title: End Ts
|
||||||
- name: download
|
|
||||||
in: query
|
|
||||||
required: false
|
|
||||||
schema:
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
title: Download
|
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
description: Successful Response
|
description: Successful Response
|
||||||
@@ -2800,13 +2796,6 @@ paths:
|
|||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
title: Event Id
|
title: Event Id
|
||||||
- name: download
|
|
||||||
in: query
|
|
||||||
required: false
|
|
||||||
schema:
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
title: Download
|
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
description: Successful Response
|
description: Successful Response
|
||||||
@@ -3121,7 +3110,9 @@ paths:
|
|||||||
tags:
|
tags:
|
||||||
- Media
|
- Media
|
||||||
summary: Label Snapshot
|
summary: Label Snapshot
|
||||||
description: Returns the snapshot image from the latest event for the given camera and label combo
|
description: >-
|
||||||
|
Returns the snapshot image from the latest event for the given camera
|
||||||
|
and label combo
|
||||||
operationId: label_snapshot__camera_name___label__snapshot_jpg_get
|
operationId: label_snapshot__camera_name___label__snapshot_jpg_get
|
||||||
parameters:
|
parameters:
|
||||||
- name: camera_name
|
- name: camera_name
|
||||||
@@ -3193,6 +3184,32 @@ components:
|
|||||||
required:
|
required:
|
||||||
- password
|
- password
|
||||||
title: AppPutPasswordBody
|
title: AppPutPasswordBody
|
||||||
|
DayReview:
|
||||||
|
properties:
|
||||||
|
day:
|
||||||
|
type: string
|
||||||
|
format: date-time
|
||||||
|
title: Day
|
||||||
|
reviewed_alert:
|
||||||
|
type: integer
|
||||||
|
title: Reviewed Alert
|
||||||
|
reviewed_detection:
|
||||||
|
type: integer
|
||||||
|
title: Reviewed Detection
|
||||||
|
total_alert:
|
||||||
|
type: integer
|
||||||
|
title: Total Alert
|
||||||
|
total_detection:
|
||||||
|
type: integer
|
||||||
|
title: Total Detection
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- day
|
||||||
|
- reviewed_alert
|
||||||
|
- reviewed_detection
|
||||||
|
- total_alert
|
||||||
|
- total_detection
|
||||||
|
title: DayReview
|
||||||
EventsCreateBody:
|
EventsCreateBody:
|
||||||
properties:
|
properties:
|
||||||
source_type:
|
source_type:
|
||||||
@@ -3237,7 +3254,6 @@ components:
|
|||||||
description:
|
description:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
minLength: 1
|
|
||||||
- type: 'null'
|
- type: 'null'
|
||||||
title: The description of the event
|
title: The description of the event
|
||||||
type: object
|
type: object
|
||||||
@@ -3270,6 +3286,27 @@ components:
|
|||||||
required:
|
required:
|
||||||
- subLabel
|
- subLabel
|
||||||
title: EventsSubLabelBody
|
title: EventsSubLabelBody
|
||||||
|
ExportRecordingsBody:
|
||||||
|
properties:
|
||||||
|
playback:
|
||||||
|
allOf:
|
||||||
|
- $ref: '#/components/schemas/PlaybackFactorEnum'
|
||||||
|
title: Playback factor
|
||||||
|
default: realtime
|
||||||
|
source:
|
||||||
|
allOf:
|
||||||
|
- $ref: '#/components/schemas/PlaybackSourceEnum'
|
||||||
|
title: Playback source
|
||||||
|
default: recordings
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
maxLength: 256
|
||||||
|
title: Friendly name
|
||||||
|
image_path:
|
||||||
|
type: string
|
||||||
|
title: Image Path
|
||||||
|
type: object
|
||||||
|
title: ExportRecordingsBody
|
||||||
Extension:
|
Extension:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
@@ -3278,6 +3315,19 @@ components:
|
|||||||
- jpg
|
- jpg
|
||||||
- jpeg
|
- jpeg
|
||||||
title: Extension
|
title: Extension
|
||||||
|
GenericResponse:
|
||||||
|
properties:
|
||||||
|
success:
|
||||||
|
type: boolean
|
||||||
|
title: Success
|
||||||
|
message:
|
||||||
|
type: string
|
||||||
|
title: Message
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- success
|
||||||
|
- message
|
||||||
|
title: GenericResponse
|
||||||
HTTPValidationError:
|
HTTPValidationError:
|
||||||
properties:
|
properties:
|
||||||
detail:
|
detail:
|
||||||
@@ -3287,6 +3337,132 @@ components:
|
|||||||
title: Detail
|
title: Detail
|
||||||
type: object
|
type: object
|
||||||
title: HTTPValidationError
|
title: HTTPValidationError
|
||||||
|
Last24HoursReview:
|
||||||
|
properties:
|
||||||
|
reviewed_alert:
|
||||||
|
type: integer
|
||||||
|
title: Reviewed Alert
|
||||||
|
reviewed_detection:
|
||||||
|
type: integer
|
||||||
|
title: Reviewed Detection
|
||||||
|
total_alert:
|
||||||
|
type: integer
|
||||||
|
title: Total Alert
|
||||||
|
total_detection:
|
||||||
|
type: integer
|
||||||
|
title: Total Detection
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- reviewed_alert
|
||||||
|
- reviewed_detection
|
||||||
|
- total_alert
|
||||||
|
- total_detection
|
||||||
|
title: Last24HoursReview
|
||||||
|
PlaybackFactorEnum:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- realtime
|
||||||
|
- timelapse_25x
|
||||||
|
title: PlaybackFactorEnum
|
||||||
|
PlaybackSourceEnum:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- recordings
|
||||||
|
- preview
|
||||||
|
title: PlaybackSourceEnum
|
||||||
|
RegenerateDescriptionEnum:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- thumbnails
|
||||||
|
- snapshot
|
||||||
|
title: RegenerateDescriptionEnum
|
||||||
|
ReviewActivityMotionResponse:
|
||||||
|
properties:
|
||||||
|
start_time:
|
||||||
|
type: integer
|
||||||
|
title: Start Time
|
||||||
|
motion:
|
||||||
|
type: number
|
||||||
|
title: Motion
|
||||||
|
camera:
|
||||||
|
type: string
|
||||||
|
title: Camera
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- start_time
|
||||||
|
- motion
|
||||||
|
- camera
|
||||||
|
title: ReviewActivityMotionResponse
|
||||||
|
ReviewModifyMultipleBody:
|
||||||
|
properties:
|
||||||
|
ids:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
minLength: 1
|
||||||
|
type: array
|
||||||
|
minItems: 1
|
||||||
|
title: Ids
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- ids
|
||||||
|
title: ReviewModifyMultipleBody
|
||||||
|
ReviewSegmentResponse:
|
||||||
|
properties:
|
||||||
|
id:
|
||||||
|
type: string
|
||||||
|
title: Id
|
||||||
|
camera:
|
||||||
|
type: string
|
||||||
|
title: Camera
|
||||||
|
start_time:
|
||||||
|
type: string
|
||||||
|
format: date-time
|
||||||
|
title: Start Time
|
||||||
|
end_time:
|
||||||
|
type: string
|
||||||
|
format: date-time
|
||||||
|
title: End Time
|
||||||
|
has_been_reviewed:
|
||||||
|
type: boolean
|
||||||
|
title: Has Been Reviewed
|
||||||
|
severity:
|
||||||
|
$ref: '#/components/schemas/SeverityEnum'
|
||||||
|
thumb_path:
|
||||||
|
type: string
|
||||||
|
title: Thumb Path
|
||||||
|
data:
|
||||||
|
title: Data
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- id
|
||||||
|
- camera
|
||||||
|
- start_time
|
||||||
|
- end_time
|
||||||
|
- has_been_reviewed
|
||||||
|
- severity
|
||||||
|
- thumb_path
|
||||||
|
- data
|
||||||
|
title: ReviewSegmentResponse
|
||||||
|
ReviewSummaryResponse:
|
||||||
|
properties:
|
||||||
|
last24Hours:
|
||||||
|
$ref: '#/components/schemas/Last24HoursReview'
|
||||||
|
root:
|
||||||
|
additionalProperties:
|
||||||
|
$ref: '#/components/schemas/DayReview'
|
||||||
|
type: object
|
||||||
|
title: Root
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- last24Hours
|
||||||
|
- root
|
||||||
|
title: ReviewSummaryResponse
|
||||||
|
SeverityEnum:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- alert
|
||||||
|
- detection
|
||||||
|
title: SeverityEnum
|
||||||
SubmitPlusBody:
|
SubmitPlusBody:
|
||||||
properties:
|
properties:
|
||||||
include_annotation:
|
include_annotation:
|
||||||
|
|||||||
@@ -357,6 +357,7 @@ def create_user(request: Request, body: AppPostUsersBody):
|
|||||||
{
|
{
|
||||||
User.username: body.username,
|
User.username: body.username,
|
||||||
User.password_hash: password_hash,
|
User.password_hash: password_hash,
|
||||||
|
User.notification_tokens: [],
|
||||||
}
|
}
|
||||||
).execute()
|
).execute()
|
||||||
return JSONResponse(content={"username": body.username})
|
return JSONResponse(content={"username": body.username})
|
||||||
|
|||||||
0
frigate/api/defs/__init__.py
Normal file
0
frigate/api/defs/__init__.py
Normal file
@@ -11,9 +11,7 @@ class EventsSubLabelBody(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
class EventsDescriptionBody(BaseModel):
|
class EventsDescriptionBody(BaseModel):
|
||||||
description: Union[str, None] = Field(
|
description: Union[str, None] = Field(title="The description of the event")
|
||||||
title="The description of the event", min_length=1
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class EventsCreateBody(BaseModel):
|
class EventsCreateBody(BaseModel):
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ class EventsQueryParams(BaseModel):
|
|||||||
is_submitted: Optional[int] = None
|
is_submitted: Optional[int] = None
|
||||||
min_length: Optional[float] = None
|
min_length: Optional[float] = None
|
||||||
max_length: Optional[float] = None
|
max_length: Optional[float] = None
|
||||||
|
event_id: Optional[str] = None
|
||||||
sort: Optional[str] = None
|
sort: Optional[str] = None
|
||||||
timezone: Optional[str] = "utc"
|
timezone: Optional[str] = "utc"
|
||||||
|
|
||||||
@@ -35,7 +36,7 @@ class EventsQueryParams(BaseModel):
|
|||||||
class EventsSearchQueryParams(BaseModel):
|
class EventsSearchQueryParams(BaseModel):
|
||||||
query: Optional[str] = None
|
query: Optional[str] = None
|
||||||
event_id: Optional[str] = None
|
event_id: Optional[str] = None
|
||||||
search_type: Optional[str] = "thumbnail,description"
|
search_type: Optional[str] = "thumbnail"
|
||||||
include_thumbnails: Optional[int] = 1
|
include_thumbnails: Optional[int] = 1
|
||||||
limit: Optional[int] = 50
|
limit: Optional[int] = 50
|
||||||
cameras: Optional[str] = "all"
|
cameras: Optional[str] = "all"
|
||||||
@@ -44,7 +45,13 @@ class EventsSearchQueryParams(BaseModel):
|
|||||||
after: Optional[float] = None
|
after: Optional[float] = None
|
||||||
before: Optional[float] = None
|
before: Optional[float] = None
|
||||||
time_range: Optional[str] = DEFAULT_TIME_RANGE
|
time_range: Optional[str] = DEFAULT_TIME_RANGE
|
||||||
|
has_clip: Optional[bool] = None
|
||||||
|
has_snapshot: Optional[bool] = None
|
||||||
|
is_submitted: Optional[bool] = None
|
||||||
timezone: Optional[str] = "utc"
|
timezone: Optional[str] = "utc"
|
||||||
|
min_score: Optional[float] = None
|
||||||
|
max_score: Optional[float] = None
|
||||||
|
sort: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
class EventsSummaryQueryParams(BaseModel):
|
class EventsSummaryQueryParams(BaseModel):
|
||||||
|
|||||||
6
frigate/api/defs/generic_response.py
Normal file
6
frigate/api/defs/generic_response.py
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
|
class GenericResponse(BaseModel):
|
||||||
|
success: bool
|
||||||
|
message: str
|
||||||
0
frigate/api/defs/request/__init__.py
Normal file
0
frigate/api/defs/request/__init__.py
Normal file
20
frigate/api/defs/request/export_recordings_body.py
Normal file
20
frigate/api/defs/request/export_recordings_body.py
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
from typing import Union
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from pydantic.json_schema import SkipJsonSchema
|
||||||
|
|
||||||
|
from frigate.record.export import (
|
||||||
|
PlaybackFactorEnum,
|
||||||
|
PlaybackSourceEnum,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ExportRecordingsBody(BaseModel):
|
||||||
|
playback: PlaybackFactorEnum = Field(
|
||||||
|
default=PlaybackFactorEnum.realtime, title="Playback factor"
|
||||||
|
)
|
||||||
|
source: PlaybackSourceEnum = Field(
|
||||||
|
default=PlaybackSourceEnum.recordings, title="Playback source"
|
||||||
|
)
|
||||||
|
name: str = Field(title="Friendly name", default=None, max_length=256)
|
||||||
|
image_path: Union[str, SkipJsonSchema[None]] = None
|
||||||
6
frigate/api/defs/review_body.py
Normal file
6
frigate/api/defs/review_body.py
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
from pydantic import BaseModel, conlist, constr
|
||||||
|
|
||||||
|
|
||||||
|
class ReviewModifyMultipleBody(BaseModel):
|
||||||
|
# List of string with at least one element and each element with at least one char
|
||||||
|
ids: conlist(constr(min_length=1), min_length=1)
|
||||||
@@ -1,28 +1,31 @@
|
|||||||
from typing import Optional
|
from typing import Union
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
from pydantic.json_schema import SkipJsonSchema
|
||||||
|
|
||||||
|
from frigate.review.maintainer import SeverityEnum
|
||||||
|
|
||||||
|
|
||||||
class ReviewQueryParams(BaseModel):
|
class ReviewQueryParams(BaseModel):
|
||||||
cameras: Optional[str] = "all"
|
cameras: str = "all"
|
||||||
labels: Optional[str] = "all"
|
labels: str = "all"
|
||||||
zones: Optional[str] = "all"
|
zones: str = "all"
|
||||||
reviewed: Optional[int] = 0
|
reviewed: int = 0
|
||||||
limit: Optional[int] = None
|
limit: Union[int, SkipJsonSchema[None]] = None
|
||||||
severity: Optional[str] = None
|
severity: Union[SeverityEnum, SkipJsonSchema[None]] = None
|
||||||
before: Optional[float] = None
|
before: Union[float, SkipJsonSchema[None]] = None
|
||||||
after: Optional[float] = None
|
after: Union[float, SkipJsonSchema[None]] = None
|
||||||
|
|
||||||
|
|
||||||
class ReviewSummaryQueryParams(BaseModel):
|
class ReviewSummaryQueryParams(BaseModel):
|
||||||
cameras: Optional[str] = "all"
|
cameras: str = "all"
|
||||||
labels: Optional[str] = "all"
|
labels: str = "all"
|
||||||
zones: Optional[str] = "all"
|
zones: str = "all"
|
||||||
timezone: Optional[str] = "utc"
|
timezone: str = "utc"
|
||||||
|
|
||||||
|
|
||||||
class ReviewActivityMotionQueryParams(BaseModel):
|
class ReviewActivityMotionQueryParams(BaseModel):
|
||||||
cameras: Optional[str] = "all"
|
cameras: str = "all"
|
||||||
before: Optional[float] = None
|
before: Union[float, SkipJsonSchema[None]] = None
|
||||||
after: Optional[float] = None
|
after: Union[float, SkipJsonSchema[None]] = None
|
||||||
scale: Optional[int] = 30
|
scale: int = 30
|
||||||
|
|||||||
43
frigate/api/defs/review_responses.py
Normal file
43
frigate/api/defs/review_responses.py
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
from datetime import datetime
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Json
|
||||||
|
|
||||||
|
from frigate.review.maintainer import SeverityEnum
|
||||||
|
|
||||||
|
|
||||||
|
class ReviewSegmentResponse(BaseModel):
|
||||||
|
id: str
|
||||||
|
camera: str
|
||||||
|
start_time: datetime
|
||||||
|
end_time: datetime
|
||||||
|
has_been_reviewed: bool
|
||||||
|
severity: SeverityEnum
|
||||||
|
thumb_path: str
|
||||||
|
data: Json
|
||||||
|
|
||||||
|
|
||||||
|
class Last24HoursReview(BaseModel):
|
||||||
|
reviewed_alert: int
|
||||||
|
reviewed_detection: int
|
||||||
|
total_alert: int
|
||||||
|
total_detection: int
|
||||||
|
|
||||||
|
|
||||||
|
class DayReview(BaseModel):
|
||||||
|
day: datetime
|
||||||
|
reviewed_alert: int
|
||||||
|
reviewed_detection: int
|
||||||
|
total_alert: int
|
||||||
|
total_detection: int
|
||||||
|
|
||||||
|
|
||||||
|
class ReviewSummaryResponse(BaseModel):
|
||||||
|
last24Hours: Last24HoursReview
|
||||||
|
root: Dict[str, DayReview]
|
||||||
|
|
||||||
|
|
||||||
|
class ReviewActivityMotionResponse(BaseModel):
|
||||||
|
start_time: int
|
||||||
|
motion: float
|
||||||
|
camera: str
|
||||||
@@ -88,6 +88,7 @@ def events(params: EventsQueryParams = Depends()):
|
|||||||
is_submitted = params.is_submitted
|
is_submitted = params.is_submitted
|
||||||
min_length = params.min_length
|
min_length = params.min_length
|
||||||
max_length = params.max_length
|
max_length = params.max_length
|
||||||
|
event_id = params.event_id
|
||||||
|
|
||||||
sort = params.sort
|
sort = params.sort
|
||||||
|
|
||||||
@@ -230,6 +231,9 @@ def events(params: EventsQueryParams = Depends()):
|
|||||||
elif is_submitted > 0:
|
elif is_submitted > 0:
|
||||||
clauses.append((Event.plus_id != ""))
|
clauses.append((Event.plus_id != ""))
|
||||||
|
|
||||||
|
if event_id is not None:
|
||||||
|
clauses.append((Event.id == event_id))
|
||||||
|
|
||||||
if len(clauses) == 0:
|
if len(clauses) == 0:
|
||||||
clauses.append((True))
|
clauses.append((True))
|
||||||
|
|
||||||
@@ -259,66 +263,61 @@ def events(params: EventsQueryParams = Depends()):
|
|||||||
|
|
||||||
@router.get("/events/explore")
|
@router.get("/events/explore")
|
||||||
def events_explore(limit: int = 10):
|
def events_explore(limit: int = 10):
|
||||||
subquery = Event.select(
|
# get distinct labels for all events
|
||||||
Event.id,
|
distinct_labels = Event.select(Event.label).distinct().order_by(Event.label)
|
||||||
Event.camera,
|
|
||||||
Event.label,
|
|
||||||
Event.zones,
|
|
||||||
Event.start_time,
|
|
||||||
Event.end_time,
|
|
||||||
Event.has_clip,
|
|
||||||
Event.has_snapshot,
|
|
||||||
Event.plus_id,
|
|
||||||
Event.retain_indefinitely,
|
|
||||||
Event.sub_label,
|
|
||||||
Event.top_score,
|
|
||||||
Event.false_positive,
|
|
||||||
Event.box,
|
|
||||||
Event.data,
|
|
||||||
fn.rank()
|
|
||||||
.over(partition_by=[Event.label], order_by=[Event.start_time.desc()])
|
|
||||||
.alias("rank"),
|
|
||||||
fn.COUNT(Event.id).over(partition_by=[Event.label]).alias("event_count"),
|
|
||||||
).alias("subquery")
|
|
||||||
|
|
||||||
query = (
|
label_counts = {}
|
||||||
Event.select(
|
|
||||||
subquery.c.id,
|
def event_generator():
|
||||||
subquery.c.camera,
|
for label_obj in distinct_labels.iterator():
|
||||||
subquery.c.label,
|
label = label_obj.label
|
||||||
subquery.c.zones,
|
|
||||||
subquery.c.start_time,
|
# get most recent events for this label
|
||||||
subquery.c.end_time,
|
label_events = (
|
||||||
subquery.c.has_clip,
|
Event.select()
|
||||||
subquery.c.has_snapshot,
|
.where(Event.label == label)
|
||||||
subquery.c.plus_id,
|
.order_by(Event.start_time.desc())
|
||||||
subquery.c.retain_indefinitely,
|
.limit(limit)
|
||||||
subquery.c.sub_label,
|
.iterator()
|
||||||
subquery.c.top_score,
|
|
||||||
subquery.c.false_positive,
|
|
||||||
subquery.c.box,
|
|
||||||
subquery.c.data,
|
|
||||||
subquery.c.event_count,
|
|
||||||
)
|
|
||||||
.from_(subquery)
|
|
||||||
.where(subquery.c.rank <= limit)
|
|
||||||
.order_by(subquery.c.event_count.desc(), subquery.c.start_time.desc())
|
|
||||||
.dicts()
|
|
||||||
)
|
)
|
||||||
|
|
||||||
events = list(query.iterator())
|
# count total events for this label
|
||||||
|
label_counts[label] = Event.select().where(Event.label == label).count()
|
||||||
|
|
||||||
processed_events = [
|
yield from label_events
|
||||||
{k: v for k, v in event.items() if k != "data"}
|
|
||||||
| {
|
def process_events():
|
||||||
|
for event in event_generator():
|
||||||
|
processed_event = {
|
||||||
|
"id": event.id,
|
||||||
|
"camera": event.camera,
|
||||||
|
"label": event.label,
|
||||||
|
"zones": event.zones,
|
||||||
|
"start_time": event.start_time,
|
||||||
|
"end_time": event.end_time,
|
||||||
|
"has_clip": event.has_clip,
|
||||||
|
"has_snapshot": event.has_snapshot,
|
||||||
|
"plus_id": event.plus_id,
|
||||||
|
"retain_indefinitely": event.retain_indefinitely,
|
||||||
|
"sub_label": event.sub_label,
|
||||||
|
"top_score": event.top_score,
|
||||||
|
"false_positive": event.false_positive,
|
||||||
|
"box": event.box,
|
||||||
"data": {
|
"data": {
|
||||||
k: v
|
k: v
|
||||||
for k, v in event["data"].items()
|
for k, v in event.data.items()
|
||||||
if k in ["type", "score", "top_score", "description"]
|
if k in ["type", "score", "top_score", "description"]
|
||||||
|
},
|
||||||
|
"event_count": label_counts[event.label],
|
||||||
}
|
}
|
||||||
}
|
yield processed_event
|
||||||
for event in events
|
|
||||||
]
|
# convert iterator to list and sort
|
||||||
|
processed_events = sorted(
|
||||||
|
process_events(),
|
||||||
|
key=lambda x: (x["event_count"], x["start_time"]),
|
||||||
|
reverse=True,
|
||||||
|
)
|
||||||
|
|
||||||
return JSONResponse(content=processed_events)
|
return JSONResponse(content=processed_events)
|
||||||
|
|
||||||
@@ -348,6 +347,7 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
|||||||
search_type = params.search_type
|
search_type = params.search_type
|
||||||
include_thumbnails = params.include_thumbnails
|
include_thumbnails = params.include_thumbnails
|
||||||
limit = params.limit
|
limit = params.limit
|
||||||
|
sort = params.sort
|
||||||
|
|
||||||
# Filters
|
# Filters
|
||||||
cameras = params.cameras
|
cameras = params.cameras
|
||||||
@@ -355,7 +355,12 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
|||||||
zones = params.zones
|
zones = params.zones
|
||||||
after = params.after
|
after = params.after
|
||||||
before = params.before
|
before = params.before
|
||||||
|
min_score = params.min_score
|
||||||
|
max_score = params.max_score
|
||||||
time_range = params.time_range
|
time_range = params.time_range
|
||||||
|
has_clip = params.has_clip
|
||||||
|
has_snapshot = params.has_snapshot
|
||||||
|
is_submitted = params.is_submitted
|
||||||
|
|
||||||
# for similarity search
|
# for similarity search
|
||||||
event_id = params.event_id
|
event_id = params.event_id
|
||||||
@@ -394,6 +399,7 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
|||||||
Event.end_time,
|
Event.end_time,
|
||||||
Event.has_clip,
|
Event.has_clip,
|
||||||
Event.has_snapshot,
|
Event.has_snapshot,
|
||||||
|
Event.top_score,
|
||||||
Event.data,
|
Event.data,
|
||||||
Event.plus_id,
|
Event.plus_id,
|
||||||
ReviewSegment.thumb_path,
|
ReviewSegment.thumb_path,
|
||||||
@@ -430,6 +436,26 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
|||||||
if before:
|
if before:
|
||||||
event_filters.append((Event.start_time < before))
|
event_filters.append((Event.start_time < before))
|
||||||
|
|
||||||
|
if has_clip is not None:
|
||||||
|
event_filters.append((Event.has_clip == has_clip))
|
||||||
|
|
||||||
|
if has_snapshot is not None:
|
||||||
|
event_filters.append((Event.has_snapshot == has_snapshot))
|
||||||
|
|
||||||
|
if is_submitted is not None:
|
||||||
|
if is_submitted == 0:
|
||||||
|
event_filters.append((Event.plus_id.is_null()))
|
||||||
|
elif is_submitted > 0:
|
||||||
|
event_filters.append((Event.plus_id != ""))
|
||||||
|
|
||||||
|
if min_score is not None and max_score is not None:
|
||||||
|
event_filters.append((Event.data["score"].between(min_score, max_score)))
|
||||||
|
else:
|
||||||
|
if min_score is not None:
|
||||||
|
event_filters.append((Event.data["score"] >= min_score))
|
||||||
|
if max_score is not None:
|
||||||
|
event_filters.append((Event.data["score"] <= max_score))
|
||||||
|
|
||||||
if time_range != DEFAULT_TIME_RANGE:
|
if time_range != DEFAULT_TIME_RANGE:
|
||||||
tz_name = params.timezone
|
tz_name = params.timezone
|
||||||
hour_modifier, minute_modifier, _ = get_tz_modifiers(tz_name)
|
hour_modifier, minute_modifier, _ = get_tz_modifiers(tz_name)
|
||||||
@@ -472,13 +498,8 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
|||||||
status_code=404,
|
status_code=404,
|
||||||
)
|
)
|
||||||
|
|
||||||
thumb_result = context.embeddings.search_thumbnail(search_event)
|
thumb_result = context.search_thumbnail(search_event)
|
||||||
thumb_ids = dict(
|
thumb_ids = {result[0]: result[1] for result in thumb_result}
|
||||||
zip(
|
|
||||||
[result[0] for result in thumb_result],
|
|
||||||
context.thumb_stats.normalize([result[1] for result in thumb_result]),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
search_results = {
|
search_results = {
|
||||||
event_id: {"distance": distance, "source": "thumbnail"}
|
event_id: {"distance": distance, "source": "thumbnail"}
|
||||||
for event_id, distance in thumb_ids.items()
|
for event_id, distance in thumb_ids.items()
|
||||||
@@ -486,15 +507,18 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
|||||||
else:
|
else:
|
||||||
search_types = search_type.split(",")
|
search_types = search_type.split(",")
|
||||||
|
|
||||||
|
# only save stats for multi-modal searches
|
||||||
|
save_stats = "thumbnail" in search_types and "description" in search_types
|
||||||
|
|
||||||
if "thumbnail" in search_types:
|
if "thumbnail" in search_types:
|
||||||
thumb_result = context.embeddings.search_thumbnail(query)
|
thumb_result = context.search_thumbnail(query)
|
||||||
thumb_ids = dict(
|
|
||||||
zip(
|
thumb_distances = context.thumb_stats.normalize(
|
||||||
[result[0] for result in thumb_result],
|
[result[1] for result in thumb_result], save_stats
|
||||||
context.thumb_stats.normalize(
|
|
||||||
[result[1] for result in thumb_result]
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
thumb_ids = dict(
|
||||||
|
zip([result[0] for result in thumb_result], thumb_distances)
|
||||||
)
|
)
|
||||||
search_results.update(
|
search_results.update(
|
||||||
{
|
{
|
||||||
@@ -504,13 +528,14 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
|||||||
)
|
)
|
||||||
|
|
||||||
if "description" in search_types:
|
if "description" in search_types:
|
||||||
desc_result = context.embeddings.search_description(query)
|
desc_result = context.search_description(query)
|
||||||
desc_ids = dict(
|
|
||||||
zip(
|
desc_distances = context.desc_stats.normalize(
|
||||||
[result[0] for result in desc_result],
|
[result[1] for result in desc_result], save_stats
|
||||||
context.desc_stats.normalize([result[1] for result in desc_result]),
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
desc_ids = dict(zip([result[0] for result in desc_result], desc_distances))
|
||||||
|
|
||||||
for event_id, distance in desc_ids.items():
|
for event_id, distance in desc_ids.items():
|
||||||
if (
|
if (
|
||||||
event_id not in search_results
|
event_id not in search_results
|
||||||
@@ -555,10 +580,18 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
|||||||
|
|
||||||
processed_events.append(processed_event)
|
processed_events.append(processed_event)
|
||||||
|
|
||||||
# Sort by search distance if search_results are available, otherwise by start_time
|
# Sort by search distance if search_results are available, otherwise by start_time as default
|
||||||
if search_results:
|
if search_results:
|
||||||
processed_events.sort(key=lambda x: x.get("search_distance", float("inf")))
|
processed_events.sort(key=lambda x: x.get("search_distance", float("inf")))
|
||||||
else:
|
else:
|
||||||
|
if sort == "score_asc":
|
||||||
|
processed_events.sort(key=lambda x: x["score"])
|
||||||
|
elif sort == "score_desc":
|
||||||
|
processed_events.sort(key=lambda x: x["score"], reverse=True)
|
||||||
|
elif sort == "date_asc":
|
||||||
|
processed_events.sort(key=lambda x: x["start_time"])
|
||||||
|
else:
|
||||||
|
# "date_desc" default
|
||||||
processed_events.sort(key=lambda x: x["start_time"], reverse=True)
|
processed_events.sort(key=lambda x: x["start_time"], reverse=True)
|
||||||
|
|
||||||
# Limit the number of events returned
|
# Limit the number of events returned
|
||||||
@@ -927,27 +960,19 @@ def set_description(
|
|||||||
|
|
||||||
new_description = body.description
|
new_description = body.description
|
||||||
|
|
||||||
if new_description is None or len(new_description) == 0:
|
|
||||||
return JSONResponse(
|
|
||||||
content=(
|
|
||||||
{
|
|
||||||
"success": False,
|
|
||||||
"message": "description cannot be empty",
|
|
||||||
}
|
|
||||||
),
|
|
||||||
status_code=400,
|
|
||||||
)
|
|
||||||
|
|
||||||
event.data["description"] = new_description
|
event.data["description"] = new_description
|
||||||
event.save()
|
event.save()
|
||||||
|
|
||||||
# If semantic search is enabled, update the index
|
# If semantic search is enabled, update the index
|
||||||
if request.app.frigate_config.semantic_search.enabled:
|
if request.app.frigate_config.semantic_search.enabled:
|
||||||
context: EmbeddingsContext = request.app.embeddings
|
context: EmbeddingsContext = request.app.embeddings
|
||||||
context.embeddings.upsert_description(
|
if len(new_description) > 0:
|
||||||
event_id=event_id,
|
context.update_description(
|
||||||
description=new_description,
|
event_id,
|
||||||
|
new_description,
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
context.db.delete_embeddings_description(event_ids=[event_id])
|
||||||
|
|
||||||
response_message = (
|
response_message = (
|
||||||
f"Event {event_id} description is now blank"
|
f"Event {event_id} description is now blank"
|
||||||
@@ -978,9 +1003,11 @@ def regenerate_description(
|
|||||||
status_code=404,
|
status_code=404,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
camera_config = request.app.frigate_config.cameras[event.camera]
|
||||||
|
|
||||||
if (
|
if (
|
||||||
request.app.frigate_config.semantic_search.enabled
|
request.app.frigate_config.semantic_search.enabled
|
||||||
and request.app.frigate_config.genai.enabled
|
and camera_config.genai.enabled
|
||||||
):
|
):
|
||||||
request.app.event_metadata_updater.publish((event.id, params.source))
|
request.app.event_metadata_updater.publish((event.id, params.source))
|
||||||
|
|
||||||
@@ -1001,7 +1028,7 @@ def regenerate_description(
|
|||||||
content=(
|
content=(
|
||||||
{
|
{
|
||||||
"success": False,
|
"success": False,
|
||||||
"message": "Semantic search and generative AI are not enabled",
|
"message": "Semantic Search and Generative AI must be enabled to regenerate a description",
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
status_code=400,
|
status_code=400,
|
||||||
@@ -1024,17 +1051,14 @@ def delete_event(request: Request, event_id: str):
|
|||||||
media.unlink(missing_ok=True)
|
media.unlink(missing_ok=True)
|
||||||
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
|
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
|
||||||
media.unlink(missing_ok=True)
|
media.unlink(missing_ok=True)
|
||||||
if event.has_clip:
|
|
||||||
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
|
|
||||||
media.unlink(missing_ok=True)
|
|
||||||
|
|
||||||
event.delete_instance()
|
event.delete_instance()
|
||||||
Timeline.delete().where(Timeline.source_id == event_id).execute()
|
Timeline.delete().where(Timeline.source_id == event_id).execute()
|
||||||
# If semantic search is enabled, update the index
|
# If semantic search is enabled, update the index
|
||||||
if request.app.frigate_config.semantic_search.enabled:
|
if request.app.frigate_config.semantic_search.enabled:
|
||||||
context: EmbeddingsContext = request.app.embeddings
|
context: EmbeddingsContext = request.app.embeddings
|
||||||
context.embeddings.delete_thumbnail(id=[event_id])
|
context.db.delete_embeddings_thumbnail(event_ids=[event_id])
|
||||||
context.embeddings.delete_description(id=[event_id])
|
context.db.delete_embeddings_description(event_ids=[event_id])
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content=({"success": True, "message": "Event " + event_id + " deleted"}),
|
content=({"success": True, "message": "Event " + event_id + " deleted"}),
|
||||||
status_code=200,
|
status_code=200,
|
||||||
|
|||||||
@@ -4,17 +4,22 @@ import logging
|
|||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import psutil
|
import psutil
|
||||||
from fastapi import APIRouter, Request
|
from fastapi import APIRouter, Request
|
||||||
from fastapi.responses import JSONResponse
|
from fastapi.responses import JSONResponse
|
||||||
from peewee import DoesNotExist
|
from peewee import DoesNotExist
|
||||||
|
|
||||||
|
from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody
|
||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
from frigate.const import EXPORT_DIR
|
from frigate.const import EXPORT_DIR
|
||||||
from frigate.models import Export, Recordings
|
from frigate.models import Export, Previews, Recordings
|
||||||
from frigate.record.export import PlaybackFactorEnum, RecordingExporter
|
from frigate.record.export import (
|
||||||
|
PlaybackFactorEnum,
|
||||||
|
PlaybackSourceEnum,
|
||||||
|
RecordingExporter,
|
||||||
|
)
|
||||||
|
from frigate.util.builtin import is_current_hour
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -33,7 +38,7 @@ def export_recording(
|
|||||||
camera_name: str,
|
camera_name: str,
|
||||||
start_time: float,
|
start_time: float,
|
||||||
end_time: float,
|
end_time: float,
|
||||||
body: dict = None,
|
body: ExportRecordingsBody,
|
||||||
):
|
):
|
||||||
if not camera_name or not request.app.frigate_config.cameras.get(camera_name):
|
if not camera_name or not request.app.frigate_config.cameras.get(camera_name):
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
@@ -43,24 +48,21 @@ def export_recording(
|
|||||||
status_code=404,
|
status_code=404,
|
||||||
)
|
)
|
||||||
|
|
||||||
json: dict[str, any] = body or {}
|
playback_factor = body.playback
|
||||||
playback_factor = json.get("playback", "realtime")
|
playback_source = body.source
|
||||||
friendly_name: Optional[str] = json.get("name")
|
friendly_name = body.name
|
||||||
|
existing_image = body.image_path
|
||||||
if len(friendly_name or "") > 256:
|
|
||||||
return JSONResponse(
|
|
||||||
content=({"success": False, "message": "File name is too long."}),
|
|
||||||
status_code=401,
|
|
||||||
)
|
|
||||||
|
|
||||||
existing_image = json.get("image_path")
|
|
||||||
|
|
||||||
|
if playback_source == "recordings":
|
||||||
recordings_count = (
|
recordings_count = (
|
||||||
Recordings.select()
|
Recordings.select()
|
||||||
.where(
|
.where(
|
||||||
Recordings.start_time.between(start_time, end_time)
|
Recordings.start_time.between(start_time, end_time)
|
||||||
| Recordings.end_time.between(start_time, end_time)
|
| Recordings.end_time.between(start_time, end_time)
|
||||||
| ((start_time > Recordings.start_time) & (end_time < Recordings.end_time))
|
| (
|
||||||
|
(start_time > Recordings.start_time)
|
||||||
|
& (end_time < Recordings.end_time)
|
||||||
|
)
|
||||||
)
|
)
|
||||||
.where(Recordings.camera == camera_name)
|
.where(Recordings.camera == camera_name)
|
||||||
.count()
|
.count()
|
||||||
@@ -73,6 +75,25 @@ def export_recording(
|
|||||||
),
|
),
|
||||||
status_code=400,
|
status_code=400,
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
previews_count = (
|
||||||
|
Previews.select()
|
||||||
|
.where(
|
||||||
|
Previews.start_time.between(start_time, end_time)
|
||||||
|
| Previews.end_time.between(start_time, end_time)
|
||||||
|
| ((start_time > Previews.start_time) & (end_time < Previews.end_time))
|
||||||
|
)
|
||||||
|
.where(Previews.camera == camera_name)
|
||||||
|
.count()
|
||||||
|
)
|
||||||
|
|
||||||
|
if not is_current_hour(start_time) and previews_count <= 0:
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{"success": False, "message": "No previews found for time range"}
|
||||||
|
),
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|
||||||
export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}"
|
export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}"
|
||||||
exporter = RecordingExporter(
|
exporter = RecordingExporter(
|
||||||
@@ -88,6 +109,11 @@ def export_recording(
|
|||||||
if playback_factor in PlaybackFactorEnum.__members__.values()
|
if playback_factor in PlaybackFactorEnum.__members__.values()
|
||||||
else PlaybackFactorEnum.realtime
|
else PlaybackFactorEnum.realtime
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
PlaybackSourceEnum[playback_source]
|
||||||
|
if playback_source in PlaybackSourceEnum.__members__.values()
|
||||||
|
else PlaybackSourceEnum.recordings
|
||||||
|
),
|
||||||
)
|
)
|
||||||
exporter.start()
|
exporter.start()
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
|
|||||||
@@ -82,6 +82,10 @@ def create_fastapi_app(
|
|||||||
database.close()
|
database.close()
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
@app.on_event("startup")
|
||||||
|
async def startup():
|
||||||
|
logger.info("FastAPI started")
|
||||||
|
|
||||||
# Rate limiter (used for login endpoint)
|
# Rate limiter (used for login endpoint)
|
||||||
auth.rateLimiter.set_limit(frigate_config.auth.failed_login_rate_limit or "")
|
auth.rateLimiter.set_limit(frigate_config.auth.failed_login_rate_limit or "")
|
||||||
app.state.limiter = limiter
|
app.state.limiter = limiter
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import os
|
|||||||
import subprocess as sp
|
import subprocess as sp
|
||||||
import time
|
import time
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
|
from pathlib import Path as FilePath
|
||||||
from urllib.parse import unquote
|
from urllib.parse import unquote
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
@@ -450,8 +451,27 @@ def recording_clip(
|
|||||||
camera_name: str,
|
camera_name: str,
|
||||||
start_ts: float,
|
start_ts: float,
|
||||||
end_ts: float,
|
end_ts: float,
|
||||||
download: bool = False,
|
|
||||||
):
|
):
|
||||||
|
def run_download(ffmpeg_cmd: list[str], file_path: str):
|
||||||
|
with sp.Popen(
|
||||||
|
ffmpeg_cmd,
|
||||||
|
stderr=sp.PIPE,
|
||||||
|
stdout=sp.PIPE,
|
||||||
|
text=False,
|
||||||
|
) as ffmpeg:
|
||||||
|
while True:
|
||||||
|
data = ffmpeg.stdout.read(8192)
|
||||||
|
if data is not None and len(data) > 0:
|
||||||
|
yield data
|
||||||
|
else:
|
||||||
|
if ffmpeg.returncode and ffmpeg.returncode != 0:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to generate clip, ffmpeg logs: {ffmpeg.stderr.read()}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
FilePath(file_path).unlink(missing_ok=True)
|
||||||
|
break
|
||||||
|
|
||||||
recordings = (
|
recordings = (
|
||||||
Recordings.select(
|
Recordings.select(
|
||||||
Recordings.path,
|
Recordings.path,
|
||||||
@@ -467,18 +487,18 @@ def recording_clip(
|
|||||||
.order_by(Recordings.start_time.asc())
|
.order_by(Recordings.start_time.asc())
|
||||||
)
|
)
|
||||||
|
|
||||||
playlist_lines = []
|
file_name = sanitize_filename(f"playlist_{camera_name}_{start_ts}-{end_ts}.txt")
|
||||||
|
file_path = f"/tmp/cache/{file_name}"
|
||||||
|
with open(file_path, "w") as file:
|
||||||
clip: Recordings
|
clip: Recordings
|
||||||
for clip in recordings:
|
for clip in recordings:
|
||||||
playlist_lines.append(f"file '{clip.path}'")
|
file.write(f"file '{clip.path}'\n")
|
||||||
# if this is the starting clip, add an inpoint
|
# if this is the starting clip, add an inpoint
|
||||||
if clip.start_time < start_ts:
|
if clip.start_time < start_ts:
|
||||||
playlist_lines.append(f"inpoint {int(start_ts - clip.start_time)}")
|
file.write(f"inpoint {int(start_ts - clip.start_time)}\n")
|
||||||
# if this is the ending clip, add an outpoint
|
# if this is the ending clip, add an outpoint
|
||||||
if clip.end_time > end_ts:
|
if clip.end_time > end_ts:
|
||||||
playlist_lines.append(f"outpoint {int(end_ts - clip.start_time)}")
|
file.write(f"outpoint {int(end_ts - clip.start_time)}\n")
|
||||||
|
|
||||||
file_name = sanitize_filename(f"clip_{camera_name}_{start_ts}-{end_ts}.mp4")
|
|
||||||
|
|
||||||
if len(file_name) > 1000:
|
if len(file_name) > 1000:
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
@@ -489,11 +509,8 @@ def recording_clip(
|
|||||||
status_code=403,
|
status_code=403,
|
||||||
)
|
)
|
||||||
|
|
||||||
path = os.path.join(CLIPS_DIR, f"cache/{file_name}")
|
|
||||||
|
|
||||||
config: FrigateConfig = request.app.frigate_config
|
config: FrigateConfig = request.app.frigate_config
|
||||||
|
|
||||||
if not os.path.exists(path):
|
|
||||||
ffmpeg_cmd = [
|
ffmpeg_cmd = [
|
||||||
config.ffmpeg.ffmpeg_path,
|
config.ffmpeg.ffmpeg_path,
|
||||||
"-hide_banner",
|
"-hide_banner",
|
||||||
@@ -505,51 +522,19 @@ def recording_clip(
|
|||||||
"-safe",
|
"-safe",
|
||||||
"0",
|
"0",
|
||||||
"-i",
|
"-i",
|
||||||
"/dev/stdin",
|
file_path,
|
||||||
"-c",
|
"-c",
|
||||||
"copy",
|
"copy",
|
||||||
"-movflags",
|
"-movflags",
|
||||||
"+faststart",
|
"frag_keyframe+empty_moov",
|
||||||
path,
|
"-f",
|
||||||
|
"mp4",
|
||||||
|
"pipe:",
|
||||||
]
|
]
|
||||||
p = sp.run(
|
|
||||||
ffmpeg_cmd,
|
|
||||||
input="\n".join(playlist_lines),
|
|
||||||
encoding="ascii",
|
|
||||||
capture_output=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
if p.returncode != 0:
|
return StreamingResponse(
|
||||||
logger.error(p.stderr)
|
run_download(ffmpeg_cmd, file_path),
|
||||||
return JSONResponse(
|
|
||||||
content={
|
|
||||||
"success": False,
|
|
||||||
"message": "Could not create clip from recordings",
|
|
||||||
},
|
|
||||||
status_code=500,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.debug(
|
|
||||||
f"Ignoring subsequent request for {path} as it already exists in the cache."
|
|
||||||
)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
"Content-Description": "File Transfer",
|
|
||||||
"Cache-Control": "no-cache",
|
|
||||||
"Content-Type": "video/mp4",
|
|
||||||
"Content-Length": str(os.path.getsize(path)),
|
|
||||||
# nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers
|
|
||||||
"X-Accel-Redirect": f"/clips/cache/{file_name}",
|
|
||||||
}
|
|
||||||
|
|
||||||
if download:
|
|
||||||
headers["Content-Disposition"] = "attachment; filename=%s" % file_name
|
|
||||||
|
|
||||||
return FileResponse(
|
|
||||||
path,
|
|
||||||
media_type="video/mp4",
|
media_type="video/mp4",
|
||||||
filename=file_name,
|
|
||||||
headers=headers,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -932,7 +917,7 @@ def grid_snapshot(
|
|||||||
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
||||||
|
|
||||||
return Response(
|
return Response(
|
||||||
jpg.tobytes,
|
jpg.tobytes(),
|
||||||
media_type="image/jpeg",
|
media_type="image/jpeg",
|
||||||
headers={"Cache-Control": "no-store"},
|
headers={"Cache-Control": "no-store"},
|
||||||
)
|
)
|
||||||
@@ -1028,7 +1013,7 @@ def event_snapshot_clean(request: Request, event_id: str, download: bool = False
|
|||||||
|
|
||||||
|
|
||||||
@router.get("/events/{event_id}/clip.mp4")
|
@router.get("/events/{event_id}/clip.mp4")
|
||||||
def event_clip(request: Request, event_id: str, download: bool = False):
|
def event_clip(request: Request, event_id: str):
|
||||||
try:
|
try:
|
||||||
event: Event = Event.get(Event.id == event_id)
|
event: Event = Event.get(Event.id == event_id)
|
||||||
except DoesNotExist:
|
except DoesNotExist:
|
||||||
@@ -1048,7 +1033,7 @@ def event_clip(request: Request, event_id: str, download: bool = False):
|
|||||||
end_ts = (
|
end_ts = (
|
||||||
datetime.now().timestamp() if event.end_time is None else event.end_time
|
datetime.now().timestamp() if event.end_time is None else event.end_time
|
||||||
)
|
)
|
||||||
return recording_clip(request, event.camera, event.start_time, end_ts, download)
|
return recording_clip(request, event.camera, event.start_time, end_ts)
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
"Content-Description": "File Transfer",
|
"Content-Description": "File Transfer",
|
||||||
@@ -1059,9 +1044,6 @@ def event_clip(request: Request, event_id: str, download: bool = False):
|
|||||||
"X-Accel-Redirect": f"/clips/{file_name}",
|
"X-Accel-Redirect": f"/clips/{file_name}",
|
||||||
}
|
}
|
||||||
|
|
||||||
if download:
|
|
||||||
headers["Content-Disposition"] = "attachment; filename=%s" % file_name
|
|
||||||
|
|
||||||
return FileResponse(
|
return FileResponse(
|
||||||
clip_path,
|
clip_path,
|
||||||
media_type="video/mp4",
|
media_type="video/mp4",
|
||||||
@@ -1471,7 +1453,6 @@ def preview_thumbnail(file_name: str):
|
|||||||
|
|
||||||
return Response(
|
return Response(
|
||||||
jpg_bytes,
|
jpg_bytes,
|
||||||
# FIXME: Shouldn't it be either jpg or webp depending on the endpoint?
|
|
||||||
media_type="image/webp",
|
media_type="image/webp",
|
||||||
headers={
|
headers={
|
||||||
"Content-Type": "image/webp",
|
"Content-Type": "image/webp",
|
||||||
@@ -1500,7 +1481,7 @@ def label_thumbnail(request: Request, camera_name: str, label: str):
|
|||||||
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
||||||
|
|
||||||
return Response(
|
return Response(
|
||||||
jpg.tobytes,
|
jpg.tobytes(),
|
||||||
media_type="image/jpeg",
|
media_type="image/jpeg",
|
||||||
headers={"Cache-Control": "no-store"},
|
headers={"Cache-Control": "no-store"},
|
||||||
)
|
)
|
||||||
@@ -1546,13 +1527,13 @@ def label_snapshot(request: Request, camera_name: str, label: str):
|
|||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
event = event_query.get()
|
event: Event = event_query.get()
|
||||||
return event_snapshot(request, event.id)
|
return event_snapshot(request, event.id, MediaEventsSnapshotQueryParams())
|
||||||
except DoesNotExist:
|
except DoesNotExist:
|
||||||
frame = np.zeros((720, 1280, 3), np.uint8)
|
frame = np.zeros((720, 1280, 3), np.uint8)
|
||||||
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
_, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
||||||
|
|
||||||
return Response(
|
return Response(
|
||||||
jpg.tobytes,
|
jpg.tobytes(),
|
||||||
media_type="image/jpeg",
|
media_type="image/jpeg",
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -12,11 +12,18 @@ from fastapi.responses import JSONResponse
|
|||||||
from peewee import Case, DoesNotExist, fn, operator
|
from peewee import Case, DoesNotExist, fn, operator
|
||||||
from playhouse.shortcuts import model_to_dict
|
from playhouse.shortcuts import model_to_dict
|
||||||
|
|
||||||
|
from frigate.api.defs.generic_response import GenericResponse
|
||||||
|
from frigate.api.defs.review_body import ReviewModifyMultipleBody
|
||||||
from frigate.api.defs.review_query_parameters import (
|
from frigate.api.defs.review_query_parameters import (
|
||||||
ReviewActivityMotionQueryParams,
|
ReviewActivityMotionQueryParams,
|
||||||
ReviewQueryParams,
|
ReviewQueryParams,
|
||||||
ReviewSummaryQueryParams,
|
ReviewSummaryQueryParams,
|
||||||
)
|
)
|
||||||
|
from frigate.api.defs.review_responses import (
|
||||||
|
ReviewActivityMotionResponse,
|
||||||
|
ReviewSegmentResponse,
|
||||||
|
ReviewSummaryResponse,
|
||||||
|
)
|
||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
from frigate.models import Recordings, ReviewSegment
|
from frigate.models import Recordings, ReviewSegment
|
||||||
from frigate.util.builtin import get_tz_modifiers
|
from frigate.util.builtin import get_tz_modifiers
|
||||||
@@ -26,7 +33,7 @@ logger = logging.getLogger(__name__)
|
|||||||
router = APIRouter(tags=[Tags.review])
|
router = APIRouter(tags=[Tags.review])
|
||||||
|
|
||||||
|
|
||||||
@router.get("/review")
|
@router.get("/review", response_model=list[ReviewSegmentResponse])
|
||||||
def review(params: ReviewQueryParams = Depends()):
|
def review(params: ReviewQueryParams = Depends()):
|
||||||
cameras = params.cameras
|
cameras = params.cameras
|
||||||
labels = params.labels
|
labels = params.labels
|
||||||
@@ -102,7 +109,7 @@ def review(params: ReviewQueryParams = Depends()):
|
|||||||
return JSONResponse(content=[r for r in review])
|
return JSONResponse(content=[r for r in review])
|
||||||
|
|
||||||
|
|
||||||
@router.get("/review/summary")
|
@router.get("/review/summary", response_model=ReviewSummaryResponse)
|
||||||
def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
||||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
|
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
|
||||||
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
|
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
|
||||||
@@ -173,18 +180,6 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
|||||||
0,
|
0,
|
||||||
)
|
)
|
||||||
).alias("reviewed_detection"),
|
).alias("reviewed_detection"),
|
||||||
fn.SUM(
|
|
||||||
Case(
|
|
||||||
None,
|
|
||||||
[
|
|
||||||
(
|
|
||||||
(ReviewSegment.severity == "significant_motion"),
|
|
||||||
ReviewSegment.has_been_reviewed,
|
|
||||||
)
|
|
||||||
],
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
).alias("reviewed_motion"),
|
|
||||||
fn.SUM(
|
fn.SUM(
|
||||||
Case(
|
Case(
|
||||||
None,
|
None,
|
||||||
@@ -209,18 +204,6 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
|||||||
0,
|
0,
|
||||||
)
|
)
|
||||||
).alias("total_detection"),
|
).alias("total_detection"),
|
||||||
fn.SUM(
|
|
||||||
Case(
|
|
||||||
None,
|
|
||||||
[
|
|
||||||
(
|
|
||||||
(ReviewSegment.severity == "significant_motion"),
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
],
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
).alias("total_motion"),
|
|
||||||
)
|
)
|
||||||
.where(reduce(operator.and_, clauses))
|
.where(reduce(operator.and_, clauses))
|
||||||
.dicts()
|
.dicts()
|
||||||
@@ -282,18 +265,6 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
|||||||
0,
|
0,
|
||||||
)
|
)
|
||||||
).alias("reviewed_detection"),
|
).alias("reviewed_detection"),
|
||||||
fn.SUM(
|
|
||||||
Case(
|
|
||||||
None,
|
|
||||||
[
|
|
||||||
(
|
|
||||||
(ReviewSegment.severity == "significant_motion"),
|
|
||||||
ReviewSegment.has_been_reviewed,
|
|
||||||
)
|
|
||||||
],
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
).alias("reviewed_motion"),
|
|
||||||
fn.SUM(
|
fn.SUM(
|
||||||
Case(
|
Case(
|
||||||
None,
|
None,
|
||||||
@@ -318,18 +289,6 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
|||||||
0,
|
0,
|
||||||
)
|
)
|
||||||
).alias("total_detection"),
|
).alias("total_detection"),
|
||||||
fn.SUM(
|
|
||||||
Case(
|
|
||||||
None,
|
|
||||||
[
|
|
||||||
(
|
|
||||||
(ReviewSegment.severity == "significant_motion"),
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
],
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
).alias("total_motion"),
|
|
||||||
)
|
)
|
||||||
.where(reduce(operator.and_, clauses))
|
.where(reduce(operator.and_, clauses))
|
||||||
.group_by(
|
.group_by(
|
||||||
@@ -348,19 +307,10 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
|||||||
return JSONResponse(content=data)
|
return JSONResponse(content=data)
|
||||||
|
|
||||||
|
|
||||||
@router.post("/reviews/viewed")
|
@router.post("/reviews/viewed", response_model=GenericResponse)
|
||||||
def set_multiple_reviewed(body: dict = None):
|
def set_multiple_reviewed(body: ReviewModifyMultipleBody):
|
||||||
json: dict[str, any] = body or {}
|
|
||||||
list_of_ids = json.get("ids", "")
|
|
||||||
|
|
||||||
if not list_of_ids or len(list_of_ids) == 0:
|
|
||||||
return JSONResponse(
|
|
||||||
context=({"success": False, "message": "Not a valid list of ids"}),
|
|
||||||
status_code=404,
|
|
||||||
)
|
|
||||||
|
|
||||||
ReviewSegment.update(has_been_reviewed=True).where(
|
ReviewSegment.update(has_been_reviewed=True).where(
|
||||||
ReviewSegment.id << list_of_ids
|
ReviewSegment.id << body.ids
|
||||||
).execute()
|
).execute()
|
||||||
|
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
@@ -369,17 +319,9 @@ def set_multiple_reviewed(body: dict = None):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@router.post("/reviews/delete")
|
@router.post("/reviews/delete", response_model=GenericResponse)
|
||||||
def delete_reviews(body: dict = None):
|
def delete_reviews(body: ReviewModifyMultipleBody):
|
||||||
json: dict[str, any] = body or {}
|
list_of_ids = body.ids
|
||||||
list_of_ids = json.get("ids", "")
|
|
||||||
|
|
||||||
if not list_of_ids or len(list_of_ids) == 0:
|
|
||||||
return JSONResponse(
|
|
||||||
content=({"success": False, "message": "Not a valid list of ids"}),
|
|
||||||
status_code=404,
|
|
||||||
)
|
|
||||||
|
|
||||||
reviews = (
|
reviews = (
|
||||||
ReviewSegment.select(
|
ReviewSegment.select(
|
||||||
ReviewSegment.camera,
|
ReviewSegment.camera,
|
||||||
@@ -424,7 +366,9 @@ def delete_reviews(body: dict = None):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@router.get("/review/activity/motion")
|
@router.get(
|
||||||
|
"/review/activity/motion", response_model=list[ReviewActivityMotionResponse]
|
||||||
|
)
|
||||||
def motion_activity(params: ReviewActivityMotionQueryParams = Depends()):
|
def motion_activity(params: ReviewActivityMotionQueryParams = Depends()):
|
||||||
"""Get motion and audio activity."""
|
"""Get motion and audio activity."""
|
||||||
cameras = params.cameras
|
cameras = params.cameras
|
||||||
@@ -498,98 +442,44 @@ def motion_activity(params: ReviewActivityMotionQueryParams = Depends()):
|
|||||||
return JSONResponse(content=normalized)
|
return JSONResponse(content=normalized)
|
||||||
|
|
||||||
|
|
||||||
@router.get("/review/activity/audio")
|
@router.get("/review/event/{event_id}", response_model=ReviewSegmentResponse)
|
||||||
def audio_activity(params: ReviewActivityMotionQueryParams = Depends()):
|
|
||||||
"""Get motion and audio activity."""
|
|
||||||
cameras = params.cameras
|
|
||||||
before = params.before or datetime.datetime.now().timestamp()
|
|
||||||
after = (
|
|
||||||
params.after
|
|
||||||
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
|
|
||||||
)
|
|
||||||
# get scale in seconds
|
|
||||||
scale = params.scale
|
|
||||||
|
|
||||||
clauses = [(Recordings.start_time > after) & (Recordings.end_time < before)]
|
|
||||||
|
|
||||||
if cameras != "all":
|
|
||||||
camera_list = cameras.split(",")
|
|
||||||
clauses.append((Recordings.camera << camera_list))
|
|
||||||
|
|
||||||
all_recordings: list[Recordings] = (
|
|
||||||
Recordings.select(
|
|
||||||
Recordings.start_time,
|
|
||||||
Recordings.duration,
|
|
||||||
Recordings.objects,
|
|
||||||
Recordings.dBFS,
|
|
||||||
)
|
|
||||||
.where(reduce(operator.and_, clauses))
|
|
||||||
.order_by(Recordings.start_time.asc())
|
|
||||||
.iterator()
|
|
||||||
)
|
|
||||||
|
|
||||||
# format is: { timestamp: segment_start_ts, motion: [0-100], audio: [0 - -100] }
|
|
||||||
# periods where active objects / audio was detected will cause audio to be scaled down
|
|
||||||
data: list[dict[str, float]] = []
|
|
||||||
|
|
||||||
for rec in all_recordings:
|
|
||||||
data.append(
|
|
||||||
{
|
|
||||||
"start_time": rec.start_time,
|
|
||||||
"audio": rec.dBFS if rec.objects == 0 else 0,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# resample data using pandas to get activity on scaled basis
|
|
||||||
df = pd.DataFrame(data, columns=["start_time", "audio"])
|
|
||||||
df = df.astype(dtype={"audio": "float16"})
|
|
||||||
|
|
||||||
# set date as datetime index
|
|
||||||
df["start_time"] = pd.to_datetime(df["start_time"], unit="s")
|
|
||||||
df.set_index(["start_time"], inplace=True)
|
|
||||||
|
|
||||||
# normalize data
|
|
||||||
df = df.resample(f"{scale}S").mean().fillna(0.0)
|
|
||||||
df["audio"] = (
|
|
||||||
(df["audio"] - df["audio"].max())
|
|
||||||
/ (df["audio"].min() - df["audio"].max())
|
|
||||||
* -100
|
|
||||||
)
|
|
||||||
|
|
||||||
# change types for output
|
|
||||||
df.index = df.index.astype(int) // (10**9)
|
|
||||||
normalized = df.reset_index().to_dict("records")
|
|
||||||
return JSONResponse(content=normalized)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/review/event/{event_id}")
|
|
||||||
def get_review_from_event(event_id: str):
|
def get_review_from_event(event_id: str):
|
||||||
try:
|
try:
|
||||||
return model_to_dict(
|
return JSONResponse(
|
||||||
|
model_to_dict(
|
||||||
ReviewSegment.get(
|
ReviewSegment.get(
|
||||||
ReviewSegment.data["detections"].cast("text") % f'*"{event_id}"*'
|
ReviewSegment.data["detections"].cast("text") % f'*"{event_id}"*'
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
)
|
||||||
except DoesNotExist:
|
except DoesNotExist:
|
||||||
return "Review item not found", 404
|
return JSONResponse(
|
||||||
|
content={"success": False, "message": "Review item not found"},
|
||||||
|
status_code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@router.get("/review/{event_id}")
|
@router.get("/review/{review_id}", response_model=ReviewSegmentResponse)
|
||||||
def get_review(event_id: str):
|
def get_review(review_id: str):
|
||||||
try:
|
try:
|
||||||
return model_to_dict(ReviewSegment.get(ReviewSegment.id == event_id))
|
return JSONResponse(
|
||||||
|
content=model_to_dict(ReviewSegment.get(ReviewSegment.id == review_id))
|
||||||
|
)
|
||||||
except DoesNotExist:
|
except DoesNotExist:
|
||||||
return "Review item not found", 404
|
return JSONResponse(
|
||||||
|
content={"success": False, "message": "Review item not found"},
|
||||||
|
status_code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@router.delete("/review/{event_id}/viewed")
|
@router.delete("/review/{review_id}/viewed", response_model=GenericResponse)
|
||||||
def set_not_reviewed(event_id: str):
|
def set_not_reviewed(review_id: str):
|
||||||
try:
|
try:
|
||||||
review: ReviewSegment = ReviewSegment.get(ReviewSegment.id == event_id)
|
review: ReviewSegment = ReviewSegment.get(ReviewSegment.id == review_id)
|
||||||
except DoesNotExist:
|
except DoesNotExist:
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content=(
|
content=(
|
||||||
{"success": False, "message": "Review " + event_id + " not found"}
|
{"success": False, "message": "Review " + review_id + " not found"}
|
||||||
),
|
),
|
||||||
status_code=404,
|
status_code=404,
|
||||||
)
|
)
|
||||||
@@ -598,6 +488,8 @@ def set_not_reviewed(event_id: str):
|
|||||||
review.save()
|
review.save()
|
||||||
|
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content=({"success": True, "message": "Reviewed " + event_id + " not viewed"}),
|
content=(
|
||||||
|
{"success": True, "message": "Set Review " + review_id + " as not viewed"}
|
||||||
|
),
|
||||||
status_code=200,
|
status_code=200,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -68,6 +68,7 @@ from frigate.stats.util import stats_init
|
|||||||
from frigate.storage import StorageMaintainer
|
from frigate.storage import StorageMaintainer
|
||||||
from frigate.timeline import TimelineProcessor
|
from frigate.timeline import TimelineProcessor
|
||||||
from frigate.util.builtin import empty_and_close_queue
|
from frigate.util.builtin import empty_and_close_queue
|
||||||
|
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
|
||||||
from frigate.util.object import get_camera_regions_grid
|
from frigate.util.object import get_camera_regions_grid
|
||||||
from frigate.version import VERSION
|
from frigate.version import VERSION
|
||||||
from frigate.video import capture_camera, track_camera
|
from frigate.video import capture_camera, track_camera
|
||||||
@@ -90,6 +91,7 @@ class FrigateApp:
|
|||||||
self.processes: dict[str, int] = {}
|
self.processes: dict[str, int] = {}
|
||||||
self.embeddings: Optional[EmbeddingsContext] = None
|
self.embeddings: Optional[EmbeddingsContext] = None
|
||||||
self.region_grids: dict[str, list[list[dict[str, int]]]] = {}
|
self.region_grids: dict[str, list[list[dict[str, int]]]] = {}
|
||||||
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
self.config = config
|
self.config = config
|
||||||
|
|
||||||
def ensure_dirs(self) -> None:
|
def ensure_dirs(self) -> None:
|
||||||
@@ -325,20 +327,20 @@ class FrigateApp:
|
|||||||
for det in self.config.detectors.values()
|
for det in self.config.detectors.values()
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
shm_in = mp.shared_memory.SharedMemory(
|
shm_in = UntrackedSharedMemory(
|
||||||
name=name,
|
name=name,
|
||||||
create=True,
|
create=True,
|
||||||
size=largest_frame,
|
size=largest_frame,
|
||||||
)
|
)
|
||||||
except FileExistsError:
|
except FileExistsError:
|
||||||
shm_in = mp.shared_memory.SharedMemory(name=name)
|
shm_in = UntrackedSharedMemory(name=name)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
shm_out = mp.shared_memory.SharedMemory(
|
shm_out = UntrackedSharedMemory(
|
||||||
name=f"out-{name}", create=True, size=20 * 6 * 4
|
name=f"out-{name}", create=True, size=20 * 6 * 4
|
||||||
)
|
)
|
||||||
except FileExistsError:
|
except FileExistsError:
|
||||||
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}")
|
shm_out = UntrackedSharedMemory(name=f"out-{name}")
|
||||||
|
|
||||||
self.detection_shms.append(shm_in)
|
self.detection_shms.append(shm_in)
|
||||||
self.detection_shms.append(shm_out)
|
self.detection_shms.append(shm_out)
|
||||||
@@ -431,6 +433,11 @@ class FrigateApp:
|
|||||||
logger.info(f"Capture process not started for disabled camera {name}")
|
logger.info(f"Capture process not started for disabled camera {name}")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# pre-create shms
|
||||||
|
for i in range(shm_frame_count):
|
||||||
|
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
|
||||||
|
self.frame_manager.create(f"{config.name}{i}", frame_size)
|
||||||
|
|
||||||
capture_process = util.Process(
|
capture_process = util.Process(
|
||||||
target=capture_camera,
|
target=capture_camera,
|
||||||
name=f"camera_capture:{name}",
|
name=f"camera_capture:{name}",
|
||||||
@@ -513,15 +520,18 @@ class FrigateApp:
|
|||||||
1,
|
1,
|
||||||
)
|
)
|
||||||
|
|
||||||
shm_frame_count = min(50, int(available_shm / (cam_total_frame_size)))
|
if cam_total_frame_size == 0.0:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
shm_frame_count = min(200, int(available_shm / (cam_total_frame_size)))
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM"
|
f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM"
|
||||||
)
|
)
|
||||||
|
|
||||||
if shm_frame_count < 10:
|
if shm_frame_count < 20:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 10)}MB."
|
f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 20)}MB."
|
||||||
)
|
)
|
||||||
|
|
||||||
return shm_frame_count
|
return shm_frame_count
|
||||||
@@ -581,12 +591,12 @@ class FrigateApp:
|
|||||||
self.init_recording_manager()
|
self.init_recording_manager()
|
||||||
self.init_review_segment_manager()
|
self.init_review_segment_manager()
|
||||||
self.init_go2rtc()
|
self.init_go2rtc()
|
||||||
|
self.start_detectors()
|
||||||
|
self.init_embeddings_manager()
|
||||||
self.bind_database()
|
self.bind_database()
|
||||||
self.check_db_data_migrations()
|
self.check_db_data_migrations()
|
||||||
self.init_inter_process_communicator()
|
self.init_inter_process_communicator()
|
||||||
self.init_dispatcher()
|
self.init_dispatcher()
|
||||||
self.start_detectors()
|
|
||||||
self.init_embeddings_manager()
|
|
||||||
self.init_embeddings_client()
|
self.init_embeddings_client()
|
||||||
self.start_video_output_processor()
|
self.start_video_output_processor()
|
||||||
self.start_ptz_autotracker()
|
self.start_ptz_autotracker()
|
||||||
@@ -699,7 +709,7 @@ class FrigateApp:
|
|||||||
|
|
||||||
# Save embeddings stats to disk
|
# Save embeddings stats to disk
|
||||||
if self.embeddings:
|
if self.embeddings:
|
||||||
self.embeddings.save_stats()
|
self.embeddings.stop()
|
||||||
|
|
||||||
# Stop Communicators
|
# Stop Communicators
|
||||||
self.inter_process_communicator.stop()
|
self.inter_process_communicator.stop()
|
||||||
@@ -707,6 +717,7 @@ class FrigateApp:
|
|||||||
self.event_metadata_updater.stop()
|
self.event_metadata_updater.stop()
|
||||||
self.inter_zmq_proxy.stop()
|
self.inter_zmq_proxy.stop()
|
||||||
|
|
||||||
|
self.frame_manager.cleanup()
|
||||||
while len(self.detection_shms) > 0:
|
while len(self.detection_shms) > 0:
|
||||||
shm = self.detection_shms.pop()
|
shm = self.detection_shms.pop()
|
||||||
shm.close()
|
shm.close()
|
||||||
|
|||||||
@@ -15,13 +15,14 @@ from frigate.const import (
|
|||||||
INSERT_PREVIEW,
|
INSERT_PREVIEW,
|
||||||
REQUEST_REGION_GRID,
|
REQUEST_REGION_GRID,
|
||||||
UPDATE_CAMERA_ACTIVITY,
|
UPDATE_CAMERA_ACTIVITY,
|
||||||
|
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
|
||||||
UPDATE_EVENT_DESCRIPTION,
|
UPDATE_EVENT_DESCRIPTION,
|
||||||
UPDATE_MODEL_STATE,
|
UPDATE_MODEL_STATE,
|
||||||
UPSERT_REVIEW_SEGMENT,
|
UPSERT_REVIEW_SEGMENT,
|
||||||
)
|
)
|
||||||
from frigate.models import Event, Previews, Recordings, ReviewSegment
|
from frigate.models import Event, Previews, Recordings, ReviewSegment
|
||||||
from frigate.ptz.onvif import OnvifCommandEnum, OnvifController
|
from frigate.ptz.onvif import OnvifCommandEnum, OnvifController
|
||||||
from frigate.types import ModelStatusTypesEnum
|
from frigate.types import ModelStatusTypesEnum, TrackedObjectUpdateTypesEnum
|
||||||
from frigate.util.object import get_camera_regions_grid
|
from frigate.util.object import get_camera_regions_grid
|
||||||
from frigate.util.services import restart_frigate
|
from frigate.util.services import restart_frigate
|
||||||
|
|
||||||
@@ -63,6 +64,9 @@ class Dispatcher:
|
|||||||
self.onvif = onvif
|
self.onvif = onvif
|
||||||
self.ptz_metrics = ptz_metrics
|
self.ptz_metrics = ptz_metrics
|
||||||
self.comms = communicators
|
self.comms = communicators
|
||||||
|
self.camera_activity = {}
|
||||||
|
self.model_state = {}
|
||||||
|
self.embeddings_reindex = {}
|
||||||
|
|
||||||
self._camera_settings_handlers: dict[str, Callable] = {
|
self._camera_settings_handlers: dict[str, Callable] = {
|
||||||
"audio": self._on_audio_command,
|
"audio": self._on_audio_command,
|
||||||
@@ -84,37 +88,25 @@ class Dispatcher:
|
|||||||
for comm in self.comms:
|
for comm in self.comms:
|
||||||
comm.subscribe(self._receive)
|
comm.subscribe(self._receive)
|
||||||
|
|
||||||
self.camera_activity = {}
|
|
||||||
self.model_state = {}
|
|
||||||
|
|
||||||
def _receive(self, topic: str, payload: str) -> Optional[Any]:
|
def _receive(self, topic: str, payload: str) -> Optional[Any]:
|
||||||
"""Handle receiving of payload from communicators."""
|
"""Handle receiving of payload from communicators."""
|
||||||
if topic.endswith("set"):
|
|
||||||
|
def handle_camera_command(command_type, camera_name, command, payload):
|
||||||
try:
|
try:
|
||||||
# example /cam_name/detect/set payload=ON|OFF
|
if command_type == "set":
|
||||||
if topic.count("/") == 2:
|
|
||||||
camera_name = topic.split("/")[-3]
|
|
||||||
command = topic.split("/")[-2]
|
|
||||||
self._camera_settings_handlers[command](camera_name, payload)
|
self._camera_settings_handlers[command](camera_name, payload)
|
||||||
elif topic.count("/") == 1:
|
elif command_type == "ptz":
|
||||||
command = topic.split("/")[-2]
|
|
||||||
self._global_settings_handlers[command](payload)
|
|
||||||
except IndexError:
|
|
||||||
logger.error(f"Received invalid set command: {topic}")
|
|
||||||
return
|
|
||||||
elif topic.endswith("ptz"):
|
|
||||||
try:
|
|
||||||
# example /cam_name/ptz payload=MOVE_UP|MOVE_DOWN|STOP...
|
|
||||||
camera_name = topic.split("/")[-2]
|
|
||||||
self._on_ptz_command(camera_name, payload)
|
self._on_ptz_command(camera_name, payload)
|
||||||
except IndexError:
|
except KeyError:
|
||||||
logger.error(f"Received invalid ptz command: {topic}")
|
logger.error(f"Invalid command type or handler: {command_type}")
|
||||||
return
|
|
||||||
elif topic == "restart":
|
def handle_restart():
|
||||||
restart_frigate()
|
restart_frigate()
|
||||||
elif topic == INSERT_MANY_RECORDINGS:
|
|
||||||
|
def handle_insert_many_recordings():
|
||||||
Recordings.insert_many(payload).execute()
|
Recordings.insert_many(payload).execute()
|
||||||
elif topic == REQUEST_REGION_GRID:
|
|
||||||
|
def handle_request_region_grid():
|
||||||
camera = payload
|
camera = payload
|
||||||
grid = get_camera_regions_grid(
|
grid = get_camera_regions_grid(
|
||||||
camera,
|
camera,
|
||||||
@@ -122,40 +114,63 @@ class Dispatcher:
|
|||||||
max(self.config.model.width, self.config.model.height),
|
max(self.config.model.width, self.config.model.height),
|
||||||
)
|
)
|
||||||
return grid
|
return grid
|
||||||
elif topic == INSERT_PREVIEW:
|
|
||||||
|
def handle_insert_preview():
|
||||||
Previews.insert(payload).execute()
|
Previews.insert(payload).execute()
|
||||||
elif topic == UPSERT_REVIEW_SEGMENT:
|
|
||||||
(
|
def handle_upsert_review_segment():
|
||||||
ReviewSegment.insert(payload)
|
ReviewSegment.insert(payload).on_conflict(
|
||||||
.on_conflict(
|
|
||||||
conflict_target=[ReviewSegment.id],
|
conflict_target=[ReviewSegment.id],
|
||||||
update=payload,
|
update=payload,
|
||||||
)
|
|
||||||
.execute()
|
|
||||||
)
|
|
||||||
elif topic == CLEAR_ONGOING_REVIEW_SEGMENTS:
|
|
||||||
ReviewSegment.update(end_time=datetime.datetime.now().timestamp()).where(
|
|
||||||
ReviewSegment.end_time == None
|
|
||||||
).execute()
|
).execute()
|
||||||
elif topic == UPDATE_CAMERA_ACTIVITY:
|
|
||||||
|
def handle_clear_ongoing_review_segments():
|
||||||
|
ReviewSegment.update(end_time=datetime.datetime.now().timestamp()).where(
|
||||||
|
ReviewSegment.end_time.is_null(True)
|
||||||
|
).execute()
|
||||||
|
|
||||||
|
def handle_update_camera_activity():
|
||||||
self.camera_activity = payload
|
self.camera_activity = payload
|
||||||
elif topic == UPDATE_EVENT_DESCRIPTION:
|
|
||||||
|
def handle_update_event_description():
|
||||||
event: Event = Event.get(Event.id == payload["id"])
|
event: Event = Event.get(Event.id == payload["id"])
|
||||||
event.data["description"] = payload["description"]
|
event.data["description"] = payload["description"]
|
||||||
event.save()
|
event.save()
|
||||||
self.publish(
|
self.publish(
|
||||||
"event_update",
|
"tracked_object_update",
|
||||||
json.dumps({"id": event.id, "description": event.data["description"]}),
|
json.dumps(
|
||||||
|
{
|
||||||
|
"type": TrackedObjectUpdateTypesEnum.description,
|
||||||
|
"id": event.id,
|
||||||
|
"description": event.data["description"],
|
||||||
|
}
|
||||||
|
),
|
||||||
)
|
)
|
||||||
elif topic == UPDATE_MODEL_STATE:
|
|
||||||
|
def handle_update_model_state():
|
||||||
|
if payload:
|
||||||
model = payload["model"]
|
model = payload["model"]
|
||||||
state = payload["state"]
|
state = payload["state"]
|
||||||
self.model_state[model] = ModelStatusTypesEnum[state]
|
self.model_state[model] = ModelStatusTypesEnum[state]
|
||||||
self.publish("model_state", json.dumps(self.model_state))
|
self.publish("model_state", json.dumps(self.model_state))
|
||||||
elif topic == "modelState":
|
|
||||||
model_state = self.model_state.copy()
|
def handle_model_state():
|
||||||
self.publish("model_state", json.dumps(model_state))
|
self.publish("model_state", json.dumps(self.model_state.copy()))
|
||||||
elif topic == "onConnect":
|
|
||||||
|
def handle_update_embeddings_reindex_progress():
|
||||||
|
self.embeddings_reindex = payload
|
||||||
|
self.publish(
|
||||||
|
"embeddings_reindex_progress",
|
||||||
|
json.dumps(payload),
|
||||||
|
)
|
||||||
|
|
||||||
|
def handle_embeddings_reindex_progress():
|
||||||
|
self.publish(
|
||||||
|
"embeddings_reindex_progress",
|
||||||
|
json.dumps(self.embeddings_reindex.copy()),
|
||||||
|
)
|
||||||
|
|
||||||
|
def handle_on_connect():
|
||||||
camera_status = self.camera_activity.copy()
|
camera_status = self.camera_activity.copy()
|
||||||
|
|
||||||
for camera in camera_status.keys():
|
for camera in camera_status.keys():
|
||||||
@@ -170,6 +185,51 @@ class Dispatcher:
|
|||||||
}
|
}
|
||||||
|
|
||||||
self.publish("camera_activity", json.dumps(camera_status))
|
self.publish("camera_activity", json.dumps(camera_status))
|
||||||
|
self.publish("model_state", json.dumps(self.model_state.copy()))
|
||||||
|
self.publish(
|
||||||
|
"embeddings_reindex_progress",
|
||||||
|
json.dumps(self.embeddings_reindex.copy()),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Dictionary mapping topic to handlers
|
||||||
|
topic_handlers = {
|
||||||
|
INSERT_MANY_RECORDINGS: handle_insert_many_recordings,
|
||||||
|
REQUEST_REGION_GRID: handle_request_region_grid,
|
||||||
|
INSERT_PREVIEW: handle_insert_preview,
|
||||||
|
UPSERT_REVIEW_SEGMENT: handle_upsert_review_segment,
|
||||||
|
CLEAR_ONGOING_REVIEW_SEGMENTS: handle_clear_ongoing_review_segments,
|
||||||
|
UPDATE_CAMERA_ACTIVITY: handle_update_camera_activity,
|
||||||
|
UPDATE_EVENT_DESCRIPTION: handle_update_event_description,
|
||||||
|
UPDATE_MODEL_STATE: handle_update_model_state,
|
||||||
|
UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress,
|
||||||
|
"restart": handle_restart,
|
||||||
|
"embeddingsReindexProgress": handle_embeddings_reindex_progress,
|
||||||
|
"modelState": handle_model_state,
|
||||||
|
"onConnect": handle_on_connect,
|
||||||
|
}
|
||||||
|
|
||||||
|
if topic.endswith("set") or topic.endswith("ptz"):
|
||||||
|
try:
|
||||||
|
parts = topic.split("/")
|
||||||
|
if len(parts) == 3 and topic.endswith("set"):
|
||||||
|
# example /cam_name/detect/set payload=ON|OFF
|
||||||
|
camera_name = parts[-3]
|
||||||
|
command = parts[-2]
|
||||||
|
handle_camera_command("set", camera_name, command, payload)
|
||||||
|
elif len(parts) == 2 and topic.endswith("set"):
|
||||||
|
command = parts[-2]
|
||||||
|
self._global_settings_handlers[command](payload)
|
||||||
|
elif len(parts) == 2 and topic.endswith("ptz"):
|
||||||
|
# example /cam_name/ptz payload=MOVE_UP|MOVE_DOWN|STOP...
|
||||||
|
camera_name = parts[-2]
|
||||||
|
handle_camera_command("ptz", camera_name, "", payload)
|
||||||
|
except IndexError:
|
||||||
|
logger.error(
|
||||||
|
f"Received invalid {topic.split('/')[-1]} command: {topic}"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
elif topic in topic_handlers:
|
||||||
|
return topic_handlers[topic]()
|
||||||
else:
|
else:
|
||||||
self.publish(topic, payload, retain=False)
|
self.publish(topic, payload, retain=False)
|
||||||
|
|
||||||
|
|||||||
65
frigate/comms/embeddings_updater.py
Normal file
65
frigate/comms/embeddings_updater.py
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
"""Facilitates communication between processes."""
|
||||||
|
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Callable
|
||||||
|
|
||||||
|
import zmq
|
||||||
|
|
||||||
|
SOCKET_REP_REQ = "ipc:///tmp/cache/embeddings"
|
||||||
|
|
||||||
|
|
||||||
|
class EmbeddingsRequestEnum(Enum):
|
||||||
|
embed_description = "embed_description"
|
||||||
|
embed_thumbnail = "embed_thumbnail"
|
||||||
|
generate_search = "generate_search"
|
||||||
|
|
||||||
|
|
||||||
|
class EmbeddingsResponder:
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.context = zmq.Context()
|
||||||
|
self.socket = self.context.socket(zmq.REP)
|
||||||
|
self.socket.bind(SOCKET_REP_REQ)
|
||||||
|
|
||||||
|
def check_for_request(self, process: Callable) -> None:
|
||||||
|
while True: # load all messages that are queued
|
||||||
|
has_message, _, _ = zmq.select([self.socket], [], [], 0.1)
|
||||||
|
|
||||||
|
if not has_message:
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
(topic, value) = self.socket.recv_json(flags=zmq.NOBLOCK)
|
||||||
|
|
||||||
|
response = process(topic, value)
|
||||||
|
|
||||||
|
if response is not None:
|
||||||
|
self.socket.send_json(response)
|
||||||
|
else:
|
||||||
|
self.socket.send_json([])
|
||||||
|
except zmq.ZMQError:
|
||||||
|
break
|
||||||
|
|
||||||
|
def stop(self) -> None:
|
||||||
|
self.socket.close()
|
||||||
|
self.context.destroy()
|
||||||
|
|
||||||
|
|
||||||
|
class EmbeddingsRequestor:
|
||||||
|
"""Simplifies sending data to EmbeddingsResponder and getting a reply."""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.context = zmq.Context()
|
||||||
|
self.socket = self.context.socket(zmq.REQ)
|
||||||
|
self.socket.connect(SOCKET_REP_REQ)
|
||||||
|
|
||||||
|
def send_data(self, topic: str, data: any) -> str:
|
||||||
|
"""Sends data and then waits for reply."""
|
||||||
|
try:
|
||||||
|
self.socket.send_json((topic, data))
|
||||||
|
return self.socket.recv_json()
|
||||||
|
except zmq.ZMQError:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def stop(self) -> None:
|
||||||
|
self.socket.close()
|
||||||
|
self.context.destroy()
|
||||||
@@ -39,7 +39,7 @@ class EventMetadataSubscriber(Subscriber):
|
|||||||
super().__init__(topic)
|
super().__init__(topic)
|
||||||
|
|
||||||
def check_for_update(
|
def check_for_update(
|
||||||
self, timeout: float = None
|
self, timeout: float = 1
|
||||||
) -> Optional[tuple[EventMetadataTypeEnum, str, RegenerateDescriptionEnum]]:
|
) -> Optional[tuple[EventMetadataTypeEnum, str, RegenerateDescriptionEnum]]:
|
||||||
return super().check_for_update(timeout)
|
return super().check_for_update(timeout)
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ class EventUpdatePublisher(Publisher):
|
|||||||
super().__init__("update")
|
super().__init__("update")
|
||||||
|
|
||||||
def publish(
|
def publish(
|
||||||
self, payload: tuple[EventTypeEnum, EventStateEnum, str, dict[str, any]]
|
self, payload: tuple[EventTypeEnum, EventStateEnum, str, str, dict[str, any]]
|
||||||
) -> None:
|
) -> None:
|
||||||
super().publish(payload)
|
super().publish(payload)
|
||||||
|
|
||||||
|
|||||||
@@ -65,8 +65,11 @@ class InterProcessRequestor:
|
|||||||
|
|
||||||
def send_data(self, topic: str, data: any) -> any:
|
def send_data(self, topic: str, data: any) -> any:
|
||||||
"""Sends data and then waits for reply."""
|
"""Sends data and then waits for reply."""
|
||||||
|
try:
|
||||||
self.socket.send_json((topic, data))
|
self.socket.send_json((topic, data))
|
||||||
return self.socket.recv_json()
|
return self.socket.recv_json()
|
||||||
|
except zmq.ZMQError:
|
||||||
|
return ""
|
||||||
|
|
||||||
def stop(self) -> None:
|
def stop(self) -> None:
|
||||||
self.socket.close()
|
self.socket.close()
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ class MqttClient(Communicator): # type: ignore[misc]
|
|||||||
def __init__(self, config: FrigateConfig) -> None:
|
def __init__(self, config: FrigateConfig) -> None:
|
||||||
self.config = config
|
self.config = config
|
||||||
self.mqtt_config = config.mqtt
|
self.mqtt_config = config.mqtt
|
||||||
self.connected: bool = False
|
self.connected = False
|
||||||
|
|
||||||
def subscribe(self, receiver: Callable) -> None:
|
def subscribe(self, receiver: Callable) -> None:
|
||||||
"""Wrapper for allowing dispatcher to subscribe."""
|
"""Wrapper for allowing dispatcher to subscribe."""
|
||||||
@@ -27,7 +27,7 @@ class MqttClient(Communicator): # type: ignore[misc]
|
|||||||
def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
|
def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
|
||||||
"""Wrapper for publishing when client is in valid state."""
|
"""Wrapper for publishing when client is in valid state."""
|
||||||
if not self.connected:
|
if not self.connected:
|
||||||
logger.error(f"Unable to publish to {topic}: client is not connected")
|
logger.debug(f"Unable to publish to {topic}: client is not connected")
|
||||||
return
|
return
|
||||||
|
|
||||||
self.client.publish(
|
self.client.publish(
|
||||||
@@ -133,7 +133,7 @@ class MqttClient(Communicator): # type: ignore[misc]
|
|||||||
"""Mqtt connection callback."""
|
"""Mqtt connection callback."""
|
||||||
threading.current_thread().name = "mqtt"
|
threading.current_thread().name = "mqtt"
|
||||||
if reason_code != 0:
|
if reason_code != 0:
|
||||||
if reason_code == "Server Unavailable":
|
if reason_code == "Server unavailable":
|
||||||
logger.error(
|
logger.error(
|
||||||
"Unable to connect to MQTT server: MQTT Server unavailable"
|
"Unable to connect to MQTT server: MQTT Server unavailable"
|
||||||
)
|
)
|
||||||
@@ -173,6 +173,7 @@ class MqttClient(Communicator): # type: ignore[misc]
|
|||||||
client_id=self.mqtt_config.client_id,
|
client_id=self.mqtt_config.client_id,
|
||||||
)
|
)
|
||||||
self.client.on_connect = self._on_connect
|
self.client.on_connect = self._on_connect
|
||||||
|
self.client.on_disconnect = self._on_disconnect
|
||||||
self.client.will_set(
|
self.client.will_set(
|
||||||
self.mqtt_config.topic_prefix + "/available",
|
self.mqtt_config.topic_prefix + "/available",
|
||||||
payload="offline",
|
payload="offline",
|
||||||
@@ -197,14 +198,6 @@ class MqttClient(Communicator): # type: ignore[misc]
|
|||||||
|
|
||||||
for name in self.config.cameras.keys():
|
for name in self.config.cameras.keys():
|
||||||
for callback in callback_types:
|
for callback in callback_types:
|
||||||
# We need to pre-clear existing set topics because in previous
|
|
||||||
# versions the webUI retained on the /set topic but this is
|
|
||||||
# no longer the case.
|
|
||||||
self.client.publish(
|
|
||||||
f"{self.mqtt_config.topic_prefix}/{name}/{callback}/set",
|
|
||||||
None,
|
|
||||||
retain=True,
|
|
||||||
)
|
|
||||||
self.client.message_callback_add(
|
self.client.message_callback_add(
|
||||||
f"{self.mqtt_config.topic_prefix}/{name}/{callback}/set",
|
f"{self.mqtt_config.topic_prefix}/{name}/{callback}/set",
|
||||||
self.on_mqtt_command,
|
self.on_mqtt_command,
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ class AuthConfig(FrigateBaseModel):
|
|||||||
default=False, title="Reset the admin password on startup"
|
default=False, title="Reset the admin password on startup"
|
||||||
)
|
)
|
||||||
cookie_name: str = Field(
|
cookie_name: str = Field(
|
||||||
default="frigate_token", title="Name for jwt token cookie", pattern=r"^[a-z]_*$"
|
default="frigate_token", title="Name for jwt token cookie", pattern=r"^[a-z_]+$"
|
||||||
)
|
)
|
||||||
cookie_secure: bool = Field(default=False, title="Set secure flag on cookie")
|
cookie_secure: bool = Field(default=False, title="Set secure flag on cookie")
|
||||||
session_length: int = Field(
|
session_length: int = Field(
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ class GenAICameraConfig(BaseModel):
|
|||||||
default=False, title="Use snapshots for generating descriptions."
|
default=False, title="Use snapshots for generating descriptions."
|
||||||
)
|
)
|
||||||
prompt: str = Field(
|
prompt: str = Field(
|
||||||
default="Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background.",
|
default="Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.",
|
||||||
title="Default caption prompt.",
|
title="Default caption prompt.",
|
||||||
)
|
)
|
||||||
object_prompts: dict[str, str] = Field(
|
object_prompts: dict[str, str] = Field(
|
||||||
@@ -51,7 +51,7 @@ class GenAICameraConfig(BaseModel):
|
|||||||
class GenAIConfig(FrigateBaseModel):
|
class GenAIConfig(FrigateBaseModel):
|
||||||
enabled: bool = Field(default=False, title="Enable GenAI.")
|
enabled: bool = Field(default=False, title="Enable GenAI.")
|
||||||
prompt: str = Field(
|
prompt: str = Field(
|
||||||
default="Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background.",
|
default="Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.",
|
||||||
title="Default caption prompt.",
|
title="Default caption prompt.",
|
||||||
)
|
)
|
||||||
object_prompts: dict[str, str] = Field(
|
object_prompts: dict[str, str] = Field(
|
||||||
|
|||||||
@@ -94,3 +94,10 @@ class RecordConfig(FrigateBaseModel):
|
|||||||
enabled_in_config: Optional[bool] = Field(
|
enabled_in_config: Optional[bool] = Field(
|
||||||
default=None, title="Keep track of original state of recording."
|
default=None, title="Keep track of original state of recording."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def event_pre_capture(self) -> int:
|
||||||
|
return max(
|
||||||
|
self.alerts.pre_capture,
|
||||||
|
self.detections.pre_capture,
|
||||||
|
)
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
yaml = YAML()
|
yaml = YAML()
|
||||||
|
|
||||||
DEFAULT_CONFIG_FILES = ["/config/config.yaml", "/config/config.yml"]
|
DEFAULT_CONFIG_FILE = "/config/config.yml"
|
||||||
DEFAULT_CONFIG = """
|
DEFAULT_CONFIG = """
|
||||||
mqtt:
|
mqtt:
|
||||||
enabled: False
|
enabled: False
|
||||||
@@ -634,27 +634,23 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load(cls, **kwargs):
|
def load(cls, **kwargs):
|
||||||
config_path = os.environ.get("CONFIG_FILE")
|
config_path = os.environ.get("CONFIG_FILE", DEFAULT_CONFIG_FILE)
|
||||||
|
|
||||||
# No explicit configuration file, try to find one in the default paths.
|
if not os.path.isfile(config_path):
|
||||||
if config_path is None:
|
config_path = config_path.replace("yml", "yaml")
|
||||||
for path in DEFAULT_CONFIG_FILES:
|
|
||||||
if os.path.isfile(path):
|
|
||||||
config_path = path
|
|
||||||
break
|
|
||||||
|
|
||||||
# No configuration file found, create one.
|
# No configuration file found, create one.
|
||||||
new_config = False
|
new_config = False
|
||||||
if config_path is None:
|
if not os.path.isfile(config_path):
|
||||||
logger.info("No config file found, saving default config")
|
logger.info("No config file found, saving default config")
|
||||||
config_path = DEFAULT_CONFIG_FILES[-1]
|
config_path = DEFAULT_CONFIG_FILE
|
||||||
new_config = True
|
new_config = True
|
||||||
else:
|
else:
|
||||||
# Check if the config file needs to be migrated.
|
# Check if the config file needs to be migrated.
|
||||||
migrate_frigate_config(config_path)
|
migrate_frigate_config(config_path)
|
||||||
|
|
||||||
# Finally, load the resulting configuration file.
|
# Finally, load the resulting configuration file.
|
||||||
with open(config_path, "a+") as f:
|
with open(config_path, "a+" if new_config else "r") as f:
|
||||||
# Only write the default config if the opened file is non-empty. This can happen as
|
# Only write the default config if the opened file is non-empty. This can happen as
|
||||||
# a race condition. It's extremely unlikely, but eh. Might as well check it.
|
# a race condition. It's extremely unlikely, but eh. Might as well check it.
|
||||||
if new_config and f.tell() == 0:
|
if new_config and f.tell() == 0:
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ EnvString = Annotated[str, AfterValidator(validate_env_string)]
|
|||||||
|
|
||||||
def validate_env_vars(v: dict[str, str], info: ValidationInfo) -> dict[str, str]:
|
def validate_env_vars(v: dict[str, str], info: ValidationInfo) -> dict[str, str]:
|
||||||
if isinstance(info.context, dict) and info.context.get("install", False):
|
if isinstance(info.context, dict) and info.context.get("install", False):
|
||||||
for k, v in v:
|
for k, v in v.items():
|
||||||
os.environ[k] = v
|
os.environ[k] = v
|
||||||
|
|
||||||
return v
|
return v
|
||||||
|
|||||||
@@ -12,3 +12,6 @@ class SemanticSearchConfig(FrigateBaseModel):
|
|||||||
reindex: Optional[bool] = Field(
|
reindex: Optional[bool] = Field(
|
||||||
default=False, title="Reindex all detections on startup."
|
default=False, title="Reindex all detections on startup."
|
||||||
)
|
)
|
||||||
|
model_size: str = Field(
|
||||||
|
default="small", title="The size of the embeddings model used."
|
||||||
|
)
|
||||||
|
|||||||
@@ -17,7 +17,21 @@ PLUS_API_HOST = "https://api.frigate.video"
|
|||||||
|
|
||||||
DEFAULT_ATTRIBUTE_LABEL_MAP = {
|
DEFAULT_ATTRIBUTE_LABEL_MAP = {
|
||||||
"person": ["amazon", "face"],
|
"person": ["amazon", "face"],
|
||||||
"car": ["amazon", "fedex", "license_plate", "ups"],
|
"car": [
|
||||||
|
"amazon",
|
||||||
|
"an_post",
|
||||||
|
"dhl",
|
||||||
|
"dpd",
|
||||||
|
"fedex",
|
||||||
|
"gls",
|
||||||
|
"license_plate",
|
||||||
|
"nzpost",
|
||||||
|
"postnl",
|
||||||
|
"postnord",
|
||||||
|
"purolator",
|
||||||
|
"ups",
|
||||||
|
"usps",
|
||||||
|
],
|
||||||
}
|
}
|
||||||
LABEL_CONSOLIDATION_MAP = {
|
LABEL_CONSOLIDATION_MAP = {
|
||||||
"car": 0.8,
|
"car": 0.8,
|
||||||
@@ -85,6 +99,7 @@ CLEAR_ONGOING_REVIEW_SEGMENTS = "clear_ongoing_review_segments"
|
|||||||
UPDATE_CAMERA_ACTIVITY = "update_camera_activity"
|
UPDATE_CAMERA_ACTIVITY = "update_camera_activity"
|
||||||
UPDATE_EVENT_DESCRIPTION = "update_event_description"
|
UPDATE_EVENT_DESCRIPTION = "update_event_description"
|
||||||
UPDATE_MODEL_STATE = "update_model_state"
|
UPDATE_MODEL_STATE = "update_model_state"
|
||||||
|
UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress"
|
||||||
|
|
||||||
# Stats Values
|
# Stats Values
|
||||||
|
|
||||||
|
|||||||
@@ -20,3 +20,34 @@ class SqliteVecQueueDatabase(SqliteQueueDatabase):
|
|||||||
conn.enable_load_extension(True)
|
conn.enable_load_extension(True)
|
||||||
conn.load_extension(self.sqlite_vec_path)
|
conn.load_extension(self.sqlite_vec_path)
|
||||||
conn.enable_load_extension(False)
|
conn.enable_load_extension(False)
|
||||||
|
|
||||||
|
def delete_embeddings_thumbnail(self, event_ids: list[str]) -> None:
|
||||||
|
ids = ",".join(["?" for _ in event_ids])
|
||||||
|
self.execute_sql(f"DELETE FROM vec_thumbnails WHERE id IN ({ids})", event_ids)
|
||||||
|
|
||||||
|
def delete_embeddings_description(self, event_ids: list[str]) -> None:
|
||||||
|
ids = ",".join(["?" for _ in event_ids])
|
||||||
|
self.execute_sql(f"DELETE FROM vec_descriptions WHERE id IN ({ids})", event_ids)
|
||||||
|
|
||||||
|
def drop_embeddings_tables(self) -> None:
|
||||||
|
self.execute_sql("""
|
||||||
|
DROP TABLE vec_descriptions;
|
||||||
|
""")
|
||||||
|
self.execute_sql("""
|
||||||
|
DROP TABLE vec_thumbnails;
|
||||||
|
""")
|
||||||
|
|
||||||
|
def create_embeddings_tables(self) -> None:
|
||||||
|
"""Create vec0 virtual table for embeddings"""
|
||||||
|
self.execute_sql("""
|
||||||
|
CREATE VIRTUAL TABLE IF NOT EXISTS vec_thumbnails USING vec0(
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
thumbnail_embedding FLOAT[768] distance_metric=cosine
|
||||||
|
);
|
||||||
|
""")
|
||||||
|
self.execute_sql("""
|
||||||
|
CREATE VIRTUAL TABLE IF NOT EXISTS vec_descriptions USING vec0(
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
description_embedding FLOAT[768] distance_metric=cosine
|
||||||
|
);
|
||||||
|
""")
|
||||||
|
|||||||
@@ -27,6 +27,11 @@ class InputTensorEnum(str, Enum):
|
|||||||
nhwc = "nhwc"
|
nhwc = "nhwc"
|
||||||
|
|
||||||
|
|
||||||
|
class InputDTypeEnum(str, Enum):
|
||||||
|
float = "float"
|
||||||
|
int = "int"
|
||||||
|
|
||||||
|
|
||||||
class ModelTypeEnum(str, Enum):
|
class ModelTypeEnum(str, Enum):
|
||||||
ssd = "ssd"
|
ssd = "ssd"
|
||||||
yolox = "yolox"
|
yolox = "yolox"
|
||||||
@@ -53,12 +58,16 @@ class ModelConfig(BaseModel):
|
|||||||
input_pixel_format: PixelFormatEnum = Field(
|
input_pixel_format: PixelFormatEnum = Field(
|
||||||
default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format"
|
default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format"
|
||||||
)
|
)
|
||||||
|
input_dtype: InputDTypeEnum = Field(
|
||||||
|
default=InputDTypeEnum.int, title="Model Input D Type"
|
||||||
|
)
|
||||||
model_type: ModelTypeEnum = Field(
|
model_type: ModelTypeEnum = Field(
|
||||||
default=ModelTypeEnum.ssd, title="Object Detection Model Type"
|
default=ModelTypeEnum.ssd, title="Object Detection Model Type"
|
||||||
)
|
)
|
||||||
_merged_labelmap: Optional[Dict[int, str]] = PrivateAttr()
|
_merged_labelmap: Optional[Dict[int, str]] = PrivateAttr()
|
||||||
_colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr()
|
_colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr()
|
||||||
_all_attributes: list[str] = PrivateAttr()
|
_all_attributes: list[str] = PrivateAttr()
|
||||||
|
_all_attribute_logos: list[str] = PrivateAttr()
|
||||||
_model_hash: str = PrivateAttr()
|
_model_hash: str = PrivateAttr()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -73,6 +82,10 @@ class ModelConfig(BaseModel):
|
|||||||
def all_attributes(self) -> list[str]:
|
def all_attributes(self) -> list[str]:
|
||||||
return self._all_attributes
|
return self._all_attributes
|
||||||
|
|
||||||
|
@property
|
||||||
|
def all_attribute_logos(self) -> list[str]:
|
||||||
|
return self._all_attribute_logos
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def model_hash(self) -> str:
|
def model_hash(self) -> str:
|
||||||
return self._model_hash
|
return self._model_hash
|
||||||
@@ -93,6 +106,9 @@ class ModelConfig(BaseModel):
|
|||||||
unique_attributes.update(attributes)
|
unique_attributes.update(attributes)
|
||||||
|
|
||||||
self._all_attributes = list(unique_attributes)
|
self._all_attributes = list(unique_attributes)
|
||||||
|
self._all_attribute_logos = list(
|
||||||
|
unique_attributes - set(["face", "license_plate"])
|
||||||
|
)
|
||||||
|
|
||||||
def check_and_load_plus_model(
|
def check_and_load_plus_model(
|
||||||
self, plus_api: PlusApi, detector: str = None
|
self, plus_api: PlusApi, detector: str = None
|
||||||
@@ -140,6 +156,9 @@ class ModelConfig(BaseModel):
|
|||||||
unique_attributes.update(attributes)
|
unique_attributes.update(attributes)
|
||||||
|
|
||||||
self._all_attributes = list(unique_attributes)
|
self._all_attributes = list(unique_attributes)
|
||||||
|
self._all_attribute_logos = list(
|
||||||
|
unique_attributes - set(["face", "license_plate"])
|
||||||
|
)
|
||||||
|
|
||||||
self._merged_labelmap = {
|
self._merged_labelmap = {
|
||||||
**{int(key): val for key, val in model_info["labelMap"].items()},
|
**{int(key): val for key, val in model_info["labelMap"].items()},
|
||||||
@@ -157,10 +176,14 @@ class ModelConfig(BaseModel):
|
|||||||
self._model_hash = file_hash.hexdigest()
|
self._model_hash = file_hash.hexdigest()
|
||||||
|
|
||||||
def create_colormap(self, enabled_labels: set[str]) -> None:
|
def create_colormap(self, enabled_labels: set[str]) -> None:
|
||||||
"""Get a list of colors for enabled labels."""
|
"""Get a list of colors for enabled labels that aren't attributes."""
|
||||||
colors = generate_color_palette(len(enabled_labels))
|
enabled_trackable_labels = list(
|
||||||
|
filter(lambda label: label not in self._all_attributes, enabled_labels)
|
||||||
self._colormap = {label: color for label, color in zip(enabled_labels, colors)}
|
)
|
||||||
|
colors = generate_color_palette(len(enabled_trackable_labels))
|
||||||
|
self._colormap = {
|
||||||
|
label: color for label, color in zip(enabled_trackable_labels, colors)
|
||||||
|
}
|
||||||
|
|
||||||
model_config = ConfigDict(extra="forbid", protected_namespaces=())
|
model_config = ConfigDict(extra="forbid", protected_namespaces=())
|
||||||
|
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ class ONNXDetector(DetectionApi):
|
|||||||
|
|
||||||
logger.info(f"ONNX: {path} loaded")
|
logger.info(f"ONNX: {path} loaded")
|
||||||
|
|
||||||
def detect_raw(self, tensor_input):
|
def detect_raw(self, tensor_input: np.ndarray):
|
||||||
model_input_name = self.model.get_inputs()[0].name
|
model_input_name = self.model.get_inputs()[0].name
|
||||||
tensor_output = self.model.run(None, {model_input_name: tensor_input})
|
tensor_output = self.model.run(None, {model_input_name: tensor_input})
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ import os
|
|||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import openvino as ov
|
import openvino as ov
|
||||||
|
import openvino.properties as props
|
||||||
from pydantic import Field
|
from pydantic import Field
|
||||||
from typing_extensions import Literal
|
from typing_extensions import Literal
|
||||||
|
|
||||||
@@ -34,6 +35,8 @@ class OvDetector(DetectionApi):
|
|||||||
logger.error(f"OpenVino model file {detector_config.model.path} not found.")
|
logger.error(f"OpenVino model file {detector_config.model.path} not found.")
|
||||||
raise FileNotFoundError
|
raise FileNotFoundError
|
||||||
|
|
||||||
|
os.makedirs("/config/model_cache/openvino", exist_ok=True)
|
||||||
|
self.ov_core.set_property({props.cache_dir: "/config/model_cache/openvino"})
|
||||||
self.interpreter = self.ov_core.compile_model(
|
self.interpreter = self.ov_core.compile_model(
|
||||||
model=detector_config.model.path, device_name=detector_config.device
|
model=detector_config.model.path, device_name=detector_config.device
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -98,9 +98,7 @@ class ROCmDetector(DetectionApi):
|
|||||||
else:
|
else:
|
||||||
logger.info(f"AMD/ROCm: loading model from {path}")
|
logger.info(f"AMD/ROCm: loading model from {path}")
|
||||||
|
|
||||||
if path.endswith(".onnx"):
|
if (
|
||||||
self.model = migraphx.parse_onnx(path)
|
|
||||||
elif (
|
|
||||||
path.endswith(".tf")
|
path.endswith(".tf")
|
||||||
or path.endswith(".tf2")
|
or path.endswith(".tf2")
|
||||||
or path.endswith(".tflite")
|
or path.endswith(".tflite")
|
||||||
@@ -108,7 +106,7 @@ class ROCmDetector(DetectionApi):
|
|||||||
# untested
|
# untested
|
||||||
self.model = migraphx.parse_tf(path)
|
self.model = migraphx.parse_tf(path)
|
||||||
else:
|
else:
|
||||||
raise Exception(f"AMD/ROCm: unknown model format {path}")
|
self.model = migraphx.parse_onnx(path)
|
||||||
|
|
||||||
logger.info("AMD/ROCm: compiling the model")
|
logger.info("AMD/ROCm: compiling the model")
|
||||||
|
|
||||||
|
|||||||
@@ -7,17 +7,18 @@ import os
|
|||||||
import signal
|
import signal
|
||||||
import threading
|
import threading
|
||||||
from types import FrameType
|
from types import FrameType
|
||||||
from typing import Optional
|
from typing import Optional, Union
|
||||||
|
|
||||||
from setproctitle import setproctitle
|
from setproctitle import setproctitle
|
||||||
|
|
||||||
|
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
from frigate.const import CONFIG_DIR
|
from frigate.const import CONFIG_DIR
|
||||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||||
from frigate.models import Event
|
from frigate.models import Event
|
||||||
|
from frigate.util.builtin import serialize
|
||||||
from frigate.util.services import listen
|
from frigate.util.services import listen
|
||||||
|
|
||||||
from .embeddings import Embeddings
|
|
||||||
from .maintainer import EmbeddingMaintainer
|
from .maintainer import EmbeddingMaintainer
|
||||||
from .util import ZScoreNormalization
|
from .util import ZScoreNormalization
|
||||||
|
|
||||||
@@ -55,12 +56,6 @@ def manage_embeddings(config: FrigateConfig) -> None:
|
|||||||
models = [Event]
|
models = [Event]
|
||||||
db.bind(models)
|
db.bind(models)
|
||||||
|
|
||||||
embeddings = Embeddings(db)
|
|
||||||
|
|
||||||
# Check if we need to re-index events
|
|
||||||
if config.semantic_search.reindex:
|
|
||||||
embeddings.reindex()
|
|
||||||
|
|
||||||
maintainer = EmbeddingMaintainer(
|
maintainer = EmbeddingMaintainer(
|
||||||
db,
|
db,
|
||||||
config,
|
config,
|
||||||
@@ -71,9 +66,10 @@ def manage_embeddings(config: FrigateConfig) -> None:
|
|||||||
|
|
||||||
class EmbeddingsContext:
|
class EmbeddingsContext:
|
||||||
def __init__(self, db: SqliteVecQueueDatabase):
|
def __init__(self, db: SqliteVecQueueDatabase):
|
||||||
self.embeddings = Embeddings(db)
|
self.db = db
|
||||||
self.thumb_stats = ZScoreNormalization()
|
self.thumb_stats = ZScoreNormalization()
|
||||||
self.desc_stats = ZScoreNormalization(scale_factor=3, bias=-2.5)
|
self.desc_stats = ZScoreNormalization()
|
||||||
|
self.requestor = EmbeddingsRequestor()
|
||||||
|
|
||||||
# load stats from disk
|
# load stats from disk
|
||||||
try:
|
try:
|
||||||
@@ -84,7 +80,7 @@ class EmbeddingsContext:
|
|||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def save_stats(self):
|
def stop(self):
|
||||||
"""Write the stats to disk as JSON on exit."""
|
"""Write the stats to disk as JSON on exit."""
|
||||||
contents = {
|
contents = {
|
||||||
"thumb_stats": self.thumb_stats.to_dict(),
|
"thumb_stats": self.thumb_stats.to_dict(),
|
||||||
@@ -92,3 +88,109 @@ class EmbeddingsContext:
|
|||||||
}
|
}
|
||||||
with open(os.path.join(CONFIG_DIR, ".search_stats.json"), "w") as f:
|
with open(os.path.join(CONFIG_DIR, ".search_stats.json"), "w") as f:
|
||||||
json.dump(contents, f)
|
json.dump(contents, f)
|
||||||
|
self.requestor.stop()
|
||||||
|
|
||||||
|
def search_thumbnail(
|
||||||
|
self, query: Union[Event, str], event_ids: list[str] = None
|
||||||
|
) -> list[tuple[str, float]]:
|
||||||
|
if query.__class__ == Event:
|
||||||
|
cursor = self.db.execute_sql(
|
||||||
|
"""
|
||||||
|
SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?
|
||||||
|
""",
|
||||||
|
[query.id],
|
||||||
|
)
|
||||||
|
|
||||||
|
row = cursor.fetchone() if cursor else None
|
||||||
|
|
||||||
|
if row:
|
||||||
|
query_embedding = row[0]
|
||||||
|
else:
|
||||||
|
# If no embedding found, generate it and return it
|
||||||
|
data = self.requestor.send_data(
|
||||||
|
EmbeddingsRequestEnum.embed_thumbnail.value,
|
||||||
|
{"id": str(query.id), "thumbnail": str(query.thumbnail)},
|
||||||
|
)
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
return []
|
||||||
|
|
||||||
|
query_embedding = serialize(data)
|
||||||
|
else:
|
||||||
|
data = self.requestor.send_data(
|
||||||
|
EmbeddingsRequestEnum.generate_search.value, query
|
||||||
|
)
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
return []
|
||||||
|
|
||||||
|
query_embedding = serialize(data)
|
||||||
|
|
||||||
|
sql_query = """
|
||||||
|
SELECT
|
||||||
|
id,
|
||||||
|
distance
|
||||||
|
FROM vec_thumbnails
|
||||||
|
WHERE thumbnail_embedding MATCH ?
|
||||||
|
AND k = 100
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Add the IN clause if event_ids is provided and not empty
|
||||||
|
# this is the only filter supported by sqlite-vec as of 0.1.3
|
||||||
|
# but it seems to be broken in this version
|
||||||
|
if event_ids:
|
||||||
|
sql_query += " AND id IN ({})".format(",".join("?" * len(event_ids)))
|
||||||
|
|
||||||
|
# order by distance DESC is not implemented in this version of sqlite-vec
|
||||||
|
# when it's implemented, we can use cosine similarity
|
||||||
|
sql_query += " ORDER BY distance"
|
||||||
|
|
||||||
|
parameters = [query_embedding] + event_ids if event_ids else [query_embedding]
|
||||||
|
|
||||||
|
results = self.db.execute_sql(sql_query, parameters).fetchall()
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def search_description(
|
||||||
|
self, query_text: str, event_ids: list[str] = None
|
||||||
|
) -> list[tuple[str, float]]:
|
||||||
|
data = self.requestor.send_data(
|
||||||
|
EmbeddingsRequestEnum.generate_search.value, query_text
|
||||||
|
)
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
return []
|
||||||
|
|
||||||
|
query_embedding = serialize(data)
|
||||||
|
|
||||||
|
# Prepare the base SQL query
|
||||||
|
sql_query = """
|
||||||
|
SELECT
|
||||||
|
id,
|
||||||
|
distance
|
||||||
|
FROM vec_descriptions
|
||||||
|
WHERE description_embedding MATCH ?
|
||||||
|
AND k = 100
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Add the IN clause if event_ids is provided and not empty
|
||||||
|
# this is the only filter supported by sqlite-vec as of 0.1.3
|
||||||
|
# but it seems to be broken in this version
|
||||||
|
if event_ids:
|
||||||
|
sql_query += " AND id IN ({})".format(",".join("?" * len(event_ids)))
|
||||||
|
|
||||||
|
# order by distance DESC is not implemented in this version of sqlite-vec
|
||||||
|
# when it's implemented, we can use cosine similarity
|
||||||
|
sql_query += " ORDER BY distance"
|
||||||
|
|
||||||
|
parameters = [query_embedding] + event_ids if event_ids else [query_embedding]
|
||||||
|
|
||||||
|
results = self.db.execute_sql(sql_query, parameters).fetchall()
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def update_description(self, event_id: str, description: str) -> None:
|
||||||
|
self.requestor.send_data(
|
||||||
|
EmbeddingsRequestEnum.embed_description.value,
|
||||||
|
{"id": event_id, "description": description},
|
||||||
|
)
|
||||||
|
|||||||
@@ -1,23 +1,26 @@
|
|||||||
"""SQLite-vec embeddings database."""
|
"""SQLite-vec embeddings database."""
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
import io
|
|
||||||
import logging
|
import logging
|
||||||
import struct
|
import os
|
||||||
import time
|
import time
|
||||||
from typing import List, Tuple, Union
|
|
||||||
|
|
||||||
from PIL import Image
|
from numpy import ndarray
|
||||||
from playhouse.shortcuts import model_to_dict
|
from playhouse.shortcuts import model_to_dict
|
||||||
|
|
||||||
from frigate.comms.inter_process import InterProcessRequestor
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
from frigate.const import UPDATE_MODEL_STATE
|
from frigate.config.semantic_search import SemanticSearchConfig
|
||||||
|
from frigate.const import (
|
||||||
|
CONFIG_DIR,
|
||||||
|
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
|
||||||
|
UPDATE_MODEL_STATE,
|
||||||
|
)
|
||||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||||
from frigate.models import Event
|
from frigate.models import Event
|
||||||
from frigate.types import ModelStatusTypesEnum
|
from frigate.types import ModelStatusTypesEnum
|
||||||
|
from frigate.util.builtin import serialize
|
||||||
|
|
||||||
from .functions.clip import ClipEmbedding
|
from .functions.onnx import GenericONNXEmbedding, ModelTypeEnum
|
||||||
from .functions.minilm_l6_v2 import MiniLMEmbedding
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -53,31 +56,26 @@ def get_metadata(event: Event) -> dict:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def serialize(vector: List[float]) -> bytes:
|
|
||||||
"""Serializes a list of floats into a compact "raw bytes" format"""
|
|
||||||
return struct.pack("%sf" % len(vector), *vector)
|
|
||||||
|
|
||||||
|
|
||||||
def deserialize(bytes_data: bytes) -> List[float]:
|
|
||||||
"""Deserializes a compact "raw bytes" format into a list of floats"""
|
|
||||||
return list(struct.unpack("%sf" % (len(bytes_data) // 4), bytes_data))
|
|
||||||
|
|
||||||
|
|
||||||
class Embeddings:
|
class Embeddings:
|
||||||
"""SQLite-vec embeddings database."""
|
"""SQLite-vec embeddings database."""
|
||||||
|
|
||||||
def __init__(self, db: SqliteVecQueueDatabase) -> None:
|
def __init__(
|
||||||
|
self, config: SemanticSearchConfig, db: SqliteVecQueueDatabase
|
||||||
|
) -> None:
|
||||||
|
self.config = config
|
||||||
self.db = db
|
self.db = db
|
||||||
self.requestor = InterProcessRequestor()
|
self.requestor = InterProcessRequestor()
|
||||||
|
|
||||||
# Create tables if they don't exist
|
# Create tables if they don't exist
|
||||||
self._create_tables()
|
self.db.create_embeddings_tables()
|
||||||
|
|
||||||
models = [
|
models = [
|
||||||
"sentence-transformers/all-MiniLM-L6-v2-model.onnx",
|
"jinaai/jina-clip-v1-text_model_fp16.onnx",
|
||||||
"sentence-transformers/all-MiniLM-L6-v2-tokenizer",
|
"jinaai/jina-clip-v1-tokenizer",
|
||||||
"clip-clip_image_model_vitb32.onnx",
|
"jinaai/jina-clip-v1-vision_model_fp16.onnx"
|
||||||
"clip-clip_text_model_vitb32.onnx",
|
if config.model_size == "large"
|
||||||
|
else "jinaai/jina-clip-v1-vision_model_quantized.onnx",
|
||||||
|
"jinaai/jina-clip-v1-preprocessor_config.json",
|
||||||
]
|
]
|
||||||
|
|
||||||
for model in models:
|
for model in models:
|
||||||
@@ -89,36 +87,53 @@ class Embeddings:
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
self.clip_embedding = ClipEmbedding(
|
self.text_embedding = GenericONNXEmbedding(
|
||||||
preferred_providers=["CPUExecutionProvider"]
|
model_name="jinaai/jina-clip-v1",
|
||||||
)
|
model_file="text_model_fp16.onnx",
|
||||||
self.minilm_embedding = MiniLMEmbedding(
|
tokenizer_file="tokenizer",
|
||||||
preferred_providers=["CPUExecutionProvider"],
|
download_urls={
|
||||||
|
"text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx",
|
||||||
|
},
|
||||||
|
model_size=config.model_size,
|
||||||
|
model_type=ModelTypeEnum.text,
|
||||||
|
requestor=self.requestor,
|
||||||
|
device="CPU",
|
||||||
)
|
)
|
||||||
|
|
||||||
def _create_tables(self):
|
model_file = (
|
||||||
# Create vec0 virtual table for thumbnail embeddings
|
"vision_model_fp16.onnx"
|
||||||
self.db.execute_sql("""
|
if self.config.model_size == "large"
|
||||||
CREATE VIRTUAL TABLE IF NOT EXISTS vec_thumbnails USING vec0(
|
else "vision_model_quantized.onnx"
|
||||||
id TEXT PRIMARY KEY,
|
)
|
||||||
thumbnail_embedding FLOAT[512]
|
|
||||||
);
|
|
||||||
""")
|
|
||||||
|
|
||||||
# Create vec0 virtual table for description embeddings
|
download_urls = {
|
||||||
self.db.execute_sql("""
|
model_file: f"https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/{model_file}",
|
||||||
CREATE VIRTUAL TABLE IF NOT EXISTS vec_descriptions USING vec0(
|
"preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/preprocessor_config.json",
|
||||||
id TEXT PRIMARY KEY,
|
}
|
||||||
description_embedding FLOAT[384]
|
|
||||||
);
|
|
||||||
""")
|
|
||||||
|
|
||||||
def upsert_thumbnail(self, event_id: str, thumbnail: bytes):
|
self.vision_embedding = GenericONNXEmbedding(
|
||||||
|
model_name="jinaai/jina-clip-v1",
|
||||||
|
model_file=model_file,
|
||||||
|
download_urls=download_urls,
|
||||||
|
model_size=config.model_size,
|
||||||
|
model_type=ModelTypeEnum.vision,
|
||||||
|
requestor=self.requestor,
|
||||||
|
device="GPU" if config.model_size == "large" else "CPU",
|
||||||
|
)
|
||||||
|
|
||||||
|
def embed_thumbnail(
|
||||||
|
self, event_id: str, thumbnail: bytes, upsert: bool = True
|
||||||
|
) -> ndarray:
|
||||||
|
"""Embed thumbnail and optionally insert into DB.
|
||||||
|
|
||||||
|
@param: event_id in Events DB
|
||||||
|
@param: thumbnail bytes in jpg format
|
||||||
|
@param: upsert If embedding should be upserted into vec DB
|
||||||
|
"""
|
||||||
# Convert thumbnail bytes to PIL Image
|
# Convert thumbnail bytes to PIL Image
|
||||||
image = Image.open(io.BytesIO(thumbnail)).convert("RGB")
|
embedding = self.vision_embedding([thumbnail])[0]
|
||||||
# Generate embedding using CLIP
|
|
||||||
embedding = self.clip_embedding([image])[0]
|
|
||||||
|
|
||||||
|
if upsert:
|
||||||
self.db.execute_sql(
|
self.db.execute_sql(
|
||||||
"""
|
"""
|
||||||
INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding)
|
INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding)
|
||||||
@@ -129,10 +144,40 @@ class Embeddings:
|
|||||||
|
|
||||||
return embedding
|
return embedding
|
||||||
|
|
||||||
def upsert_description(self, event_id: str, description: str):
|
def batch_embed_thumbnail(
|
||||||
# Generate embedding using MiniLM
|
self, event_thumbs: dict[str, bytes], upsert: bool = True
|
||||||
embedding = self.minilm_embedding([description])[0]
|
) -> list[ndarray]:
|
||||||
|
"""Embed thumbnails and optionally insert into DB.
|
||||||
|
|
||||||
|
@param: event_thumbs Map of Event IDs in DB to thumbnail bytes in jpg format
|
||||||
|
@param: upsert If embedding should be upserted into vec DB
|
||||||
|
"""
|
||||||
|
ids = list(event_thumbs.keys())
|
||||||
|
embeddings = self.vision_embedding(list(event_thumbs.values()))
|
||||||
|
|
||||||
|
if upsert:
|
||||||
|
items = []
|
||||||
|
|
||||||
|
for i in range(len(ids)):
|
||||||
|
items.append(ids[i])
|
||||||
|
items.append(serialize(embeddings[i]))
|
||||||
|
|
||||||
|
self.db.execute_sql(
|
||||||
|
"""
|
||||||
|
INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding)
|
||||||
|
VALUES {}
|
||||||
|
""".format(", ".join(["(?, ?)"] * len(ids))),
|
||||||
|
items,
|
||||||
|
)
|
||||||
|
|
||||||
|
return embeddings
|
||||||
|
|
||||||
|
def embed_description(
|
||||||
|
self, event_id: str, description: str, upsert: bool = True
|
||||||
|
) -> ndarray:
|
||||||
|
embedding = self.text_embedding([description])[0]
|
||||||
|
|
||||||
|
if upsert:
|
||||||
self.db.execute_sql(
|
self.db.execute_sql(
|
||||||
"""
|
"""
|
||||||
INSERT OR REPLACE INTO vec_descriptions(id, description_embedding)
|
INSERT OR REPLACE INTO vec_descriptions(id, description_embedding)
|
||||||
@@ -143,117 +188,71 @@ class Embeddings:
|
|||||||
|
|
||||||
return embedding
|
return embedding
|
||||||
|
|
||||||
def delete_thumbnail(self, event_ids: List[str]) -> None:
|
def batch_embed_description(
|
||||||
ids = ",".join(["?" for _ in event_ids])
|
self, event_descriptions: dict[str, str], upsert: bool = True
|
||||||
|
) -> ndarray:
|
||||||
|
# upsert embeddings one by one to avoid token limit
|
||||||
|
embeddings = []
|
||||||
|
|
||||||
|
for desc in event_descriptions.values():
|
||||||
|
embeddings.append(self.text_embedding([desc])[0])
|
||||||
|
|
||||||
|
if upsert:
|
||||||
|
ids = list(event_descriptions.keys())
|
||||||
|
items = []
|
||||||
|
|
||||||
|
for i in range(len(ids)):
|
||||||
|
items.append(ids[i])
|
||||||
|
items.append(serialize(embeddings[i]))
|
||||||
|
|
||||||
self.db.execute_sql(
|
self.db.execute_sql(
|
||||||
f"DELETE FROM vec_thumbnails WHERE id IN ({ids})", event_ids
|
|
||||||
)
|
|
||||||
|
|
||||||
def delete_description(self, event_ids: List[str]) -> None:
|
|
||||||
ids = ",".join(["?" for _ in event_ids])
|
|
||||||
self.db.execute_sql(
|
|
||||||
f"DELETE FROM vec_descriptions WHERE id IN ({ids})", event_ids
|
|
||||||
)
|
|
||||||
|
|
||||||
def search_thumbnail(
|
|
||||||
self, query: Union[Event, str], event_ids: List[str] = None
|
|
||||||
) -> List[Tuple[str, float]]:
|
|
||||||
if query.__class__ == Event:
|
|
||||||
cursor = self.db.execute_sql(
|
|
||||||
"""
|
"""
|
||||||
SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?
|
INSERT OR REPLACE INTO vec_descriptions(id, description_embedding)
|
||||||
""",
|
VALUES {}
|
||||||
[query.id],
|
""".format(", ".join(["(?, ?)"] * len(ids))),
|
||||||
|
items,
|
||||||
)
|
)
|
||||||
|
|
||||||
row = cursor.fetchone() if cursor else None
|
return embeddings
|
||||||
|
|
||||||
if row:
|
|
||||||
query_embedding = deserialize(
|
|
||||||
row[0]
|
|
||||||
) # Deserialize the thumbnail embedding
|
|
||||||
else:
|
|
||||||
# If no embedding found, generate it and return it
|
|
||||||
thumbnail = base64.b64decode(query.thumbnail)
|
|
||||||
query_embedding = self.upsert_thumbnail(query.id, thumbnail)
|
|
||||||
else:
|
|
||||||
query_embedding = self.clip_embedding([query])[0]
|
|
||||||
|
|
||||||
sql_query = """
|
|
||||||
SELECT
|
|
||||||
id,
|
|
||||||
distance
|
|
||||||
FROM vec_thumbnails
|
|
||||||
WHERE thumbnail_embedding MATCH ?
|
|
||||||
AND k = 100
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Add the IN clause if event_ids is provided and not empty
|
|
||||||
# this is the only filter supported by sqlite-vec as of 0.1.3
|
|
||||||
# but it seems to be broken in this version
|
|
||||||
if event_ids:
|
|
||||||
sql_query += " AND id IN ({})".format(",".join("?" * len(event_ids)))
|
|
||||||
|
|
||||||
# order by distance DESC is not implemented in this version of sqlite-vec
|
|
||||||
# when it's implemented, we can use cosine similarity
|
|
||||||
sql_query += " ORDER BY distance"
|
|
||||||
|
|
||||||
parameters = (
|
|
||||||
[serialize(query_embedding)] + event_ids
|
|
||||||
if event_ids
|
|
||||||
else [serialize(query_embedding)]
|
|
||||||
)
|
|
||||||
|
|
||||||
results = self.db.execute_sql(sql_query, parameters).fetchall()
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
def search_description(
|
|
||||||
self, query_text: str, event_ids: List[str] = None
|
|
||||||
) -> List[Tuple[str, float]]:
|
|
||||||
query_embedding = self.minilm_embedding([query_text])[0]
|
|
||||||
|
|
||||||
# Prepare the base SQL query
|
|
||||||
sql_query = """
|
|
||||||
SELECT
|
|
||||||
id,
|
|
||||||
distance
|
|
||||||
FROM vec_descriptions
|
|
||||||
WHERE description_embedding MATCH ?
|
|
||||||
AND k = 100
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Add the IN clause if event_ids is provided and not empty
|
|
||||||
# this is the only filter supported by sqlite-vec as of 0.1.3
|
|
||||||
# but it seems to be broken in this version
|
|
||||||
if event_ids:
|
|
||||||
sql_query += " AND id IN ({})".format(",".join("?" * len(event_ids)))
|
|
||||||
|
|
||||||
# order by distance DESC is not implemented in this version of sqlite-vec
|
|
||||||
# when it's implemented, we can use cosine similarity
|
|
||||||
sql_query += " ORDER BY distance"
|
|
||||||
|
|
||||||
parameters = (
|
|
||||||
[serialize(query_embedding)] + event_ids
|
|
||||||
if event_ids
|
|
||||||
else [serialize(query_embedding)]
|
|
||||||
)
|
|
||||||
|
|
||||||
results = self.db.execute_sql(sql_query, parameters).fetchall()
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
def reindex(self) -> None:
|
def reindex(self) -> None:
|
||||||
logger.info("Indexing event embeddings...")
|
logger.info("Indexing tracked object embeddings...")
|
||||||
|
|
||||||
|
self.db.drop_embeddings_tables()
|
||||||
|
logger.debug("Dropped embeddings tables.")
|
||||||
|
self.db.create_embeddings_tables()
|
||||||
|
logger.debug("Created embeddings tables.")
|
||||||
|
|
||||||
|
# Delete the saved stats file
|
||||||
|
if os.path.exists(os.path.join(CONFIG_DIR, ".search_stats.json")):
|
||||||
|
os.remove(os.path.join(CONFIG_DIR, ".search_stats.json"))
|
||||||
|
|
||||||
st = time.time()
|
st = time.time()
|
||||||
|
|
||||||
|
# Get total count of events to process
|
||||||
|
total_events = (
|
||||||
|
Event.select()
|
||||||
|
.where(
|
||||||
|
(Event.has_clip == True | Event.has_snapshot == True)
|
||||||
|
& Event.thumbnail.is_null(False)
|
||||||
|
)
|
||||||
|
.count()
|
||||||
|
)
|
||||||
|
|
||||||
|
batch_size = 32
|
||||||
|
current_page = 1
|
||||||
|
|
||||||
totals = {
|
totals = {
|
||||||
"thumb": 0,
|
"thumbnails": 0,
|
||||||
"desc": 0,
|
"descriptions": 0,
|
||||||
|
"processed_objects": total_events - 1 if total_events < batch_size else 0,
|
||||||
|
"total_objects": total_events,
|
||||||
|
"time_remaining": 0 if total_events < batch_size else -1,
|
||||||
|
"status": "indexing",
|
||||||
}
|
}
|
||||||
|
|
||||||
batch_size = 100
|
self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
|
||||||
current_page = 1
|
|
||||||
events = (
|
events = (
|
||||||
Event.select()
|
Event.select()
|
||||||
.where(
|
.where(
|
||||||
@@ -266,14 +265,45 @@ class Embeddings:
|
|||||||
|
|
||||||
while len(events) > 0:
|
while len(events) > 0:
|
||||||
event: Event
|
event: Event
|
||||||
|
batch_thumbs = {}
|
||||||
|
batch_descs = {}
|
||||||
for event in events:
|
for event in events:
|
||||||
thumbnail = base64.b64decode(event.thumbnail)
|
batch_thumbs[event.id] = base64.b64decode(event.thumbnail)
|
||||||
self.upsert_thumbnail(event.id, thumbnail)
|
totals["thumbnails"] += 1
|
||||||
totals["thumb"] += 1
|
|
||||||
if description := event.data.get("description", "").strip():
|
|
||||||
totals["desc"] += 1
|
|
||||||
self.upsert_description(event.id, description)
|
|
||||||
|
|
||||||
|
if description := event.data.get("description", "").strip():
|
||||||
|
batch_descs[event.id] = description
|
||||||
|
totals["descriptions"] += 1
|
||||||
|
|
||||||
|
totals["processed_objects"] += 1
|
||||||
|
|
||||||
|
# run batch embedding
|
||||||
|
self.batch_embed_thumbnail(batch_thumbs)
|
||||||
|
|
||||||
|
if batch_descs:
|
||||||
|
self.batch_embed_description(batch_descs)
|
||||||
|
|
||||||
|
# report progress every batch so we don't spam the logs
|
||||||
|
progress = (totals["processed_objects"] / total_events) * 100
|
||||||
|
logger.debug(
|
||||||
|
"Processed %d/%d events (%.2f%% complete) | Thumbnails: %d, Descriptions: %d",
|
||||||
|
totals["processed_objects"],
|
||||||
|
total_events,
|
||||||
|
progress,
|
||||||
|
totals["thumbnails"],
|
||||||
|
totals["descriptions"],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate time remaining
|
||||||
|
elapsed_time = time.time() - st
|
||||||
|
avg_time_per_event = elapsed_time / totals["processed_objects"]
|
||||||
|
remaining_events = total_events - totals["processed_objects"]
|
||||||
|
time_remaining = avg_time_per_event * remaining_events
|
||||||
|
totals["time_remaining"] = int(time_remaining)
|
||||||
|
|
||||||
|
self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
|
||||||
|
|
||||||
|
# Move to the next page
|
||||||
current_page += 1
|
current_page += 1
|
||||||
events = (
|
events = (
|
||||||
Event.select()
|
Event.select()
|
||||||
@@ -287,7 +317,10 @@ class Embeddings:
|
|||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"Embedded %d thumbnails and %d descriptions in %s seconds",
|
"Embedded %d thumbnails and %d descriptions in %s seconds",
|
||||||
totals["thumb"],
|
totals["thumbnails"],
|
||||||
totals["desc"],
|
totals["descriptions"],
|
||||||
time.time() - st,
|
round(time.time() - st, 1),
|
||||||
)
|
)
|
||||||
|
totals["status"] = "completed"
|
||||||
|
|
||||||
|
self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
|
||||||
|
|||||||
@@ -1,166 +0,0 @@
|
|||||||
import logging
|
|
||||||
import os
|
|
||||||
from typing import List, Optional, Union
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import onnxruntime as ort
|
|
||||||
from onnx_clip import OnnxClip, Preprocessor, Tokenizer
|
|
||||||
from PIL import Image
|
|
||||||
|
|
||||||
from frigate.const import MODEL_CACHE_DIR, UPDATE_MODEL_STATE
|
|
||||||
from frigate.types import ModelStatusTypesEnum
|
|
||||||
from frigate.util.downloader import ModelDownloader
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class Clip(OnnxClip):
|
|
||||||
"""Override load models to use pre-downloaded models from cache directory."""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
model: str = "ViT-B/32",
|
|
||||||
batch_size: Optional[int] = None,
|
|
||||||
providers: List[str] = ["CPUExecutionProvider"],
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Instantiates the model and required encoding classes.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model: The model to utilize. Currently ViT-B/32 and RN50 are
|
|
||||||
allowed.
|
|
||||||
batch_size: If set, splits the lists in `get_image_embeddings`
|
|
||||||
and `get_text_embeddings` into batches of this size before
|
|
||||||
passing them to the model. The embeddings are then concatenated
|
|
||||||
back together before being returned. This is necessary when
|
|
||||||
passing large amounts of data (perhaps ~100 or more).
|
|
||||||
"""
|
|
||||||
allowed_models = ["ViT-B/32", "RN50"]
|
|
||||||
if model not in allowed_models:
|
|
||||||
raise ValueError(f"`model` must be in {allowed_models}. Got {model}.")
|
|
||||||
if model == "ViT-B/32":
|
|
||||||
self.embedding_size = 512
|
|
||||||
elif model == "RN50":
|
|
||||||
self.embedding_size = 1024
|
|
||||||
self.image_model, self.text_model = self._load_models(model, providers)
|
|
||||||
self._tokenizer = Tokenizer()
|
|
||||||
self._preprocessor = Preprocessor()
|
|
||||||
self._batch_size = batch_size
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _load_models(
|
|
||||||
model: str,
|
|
||||||
providers: List[str],
|
|
||||||
) -> tuple[ort.InferenceSession, ort.InferenceSession]:
|
|
||||||
"""
|
|
||||||
Load models from cache directory.
|
|
||||||
"""
|
|
||||||
if model == "ViT-B/32":
|
|
||||||
IMAGE_MODEL_FILE = "clip_image_model_vitb32.onnx"
|
|
||||||
TEXT_MODEL_FILE = "clip_text_model_vitb32.onnx"
|
|
||||||
elif model == "RN50":
|
|
||||||
IMAGE_MODEL_FILE = "clip_image_model_rn50.onnx"
|
|
||||||
TEXT_MODEL_FILE = "clip_text_model_rn50.onnx"
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Unexpected model {model}. No `.onnx` file found.")
|
|
||||||
|
|
||||||
models = []
|
|
||||||
for model_file in [IMAGE_MODEL_FILE, TEXT_MODEL_FILE]:
|
|
||||||
path = os.path.join(MODEL_CACHE_DIR, "clip", model_file)
|
|
||||||
models.append(Clip._load_model(path, providers))
|
|
||||||
|
|
||||||
return models[0], models[1]
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _load_model(path: str, providers: List[str]):
|
|
||||||
if os.path.exists(path):
|
|
||||||
return ort.InferenceSession(path, providers=providers)
|
|
||||||
else:
|
|
||||||
logger.warning(f"CLIP model file {path} not found.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class ClipEmbedding:
|
|
||||||
"""Embedding function for CLIP model."""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
model: str = "ViT-B/32",
|
|
||||||
silent: bool = False,
|
|
||||||
preferred_providers: List[str] = ["CPUExecutionProvider"],
|
|
||||||
):
|
|
||||||
self.model_name = model
|
|
||||||
self.silent = silent
|
|
||||||
self.preferred_providers = preferred_providers
|
|
||||||
self.model_files = self._get_model_files()
|
|
||||||
self.model = None
|
|
||||||
|
|
||||||
self.downloader = ModelDownloader(
|
|
||||||
model_name="clip",
|
|
||||||
download_path=os.path.join(MODEL_CACHE_DIR, "clip"),
|
|
||||||
file_names=self.model_files,
|
|
||||||
download_func=self._download_model,
|
|
||||||
silent=self.silent,
|
|
||||||
)
|
|
||||||
self.downloader.ensure_model_files()
|
|
||||||
|
|
||||||
def _get_model_files(self):
|
|
||||||
if self.model_name == "ViT-B/32":
|
|
||||||
return ["clip_image_model_vitb32.onnx", "clip_text_model_vitb32.onnx"]
|
|
||||||
elif self.model_name == "RN50":
|
|
||||||
return ["clip_image_model_rn50.onnx", "clip_text_model_rn50.onnx"]
|
|
||||||
else:
|
|
||||||
raise ValueError(
|
|
||||||
f"Unexpected model {self.model_name}. No `.onnx` file found."
|
|
||||||
)
|
|
||||||
|
|
||||||
def _download_model(self, path: str):
|
|
||||||
s3_url = (
|
|
||||||
f"https://lakera-clip.s3.eu-west-1.amazonaws.com/{os.path.basename(path)}"
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
ModelDownloader.download_from_url(s3_url, path, self.silent)
|
|
||||||
self.downloader.requestor.send_data(
|
|
||||||
UPDATE_MODEL_STATE,
|
|
||||||
{
|
|
||||||
"model": f"{self.model_name}-{os.path.basename(path)}",
|
|
||||||
"state": ModelStatusTypesEnum.downloaded,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
self.downloader.requestor.send_data(
|
|
||||||
UPDATE_MODEL_STATE,
|
|
||||||
{
|
|
||||||
"model": f"{self.model_name}-{os.path.basename(path)}",
|
|
||||||
"state": ModelStatusTypesEnum.error,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
def _load_model(self):
|
|
||||||
if self.model is None:
|
|
||||||
self.downloader.wait_for_download()
|
|
||||||
self.model = Clip(self.model_name, providers=self.preferred_providers)
|
|
||||||
|
|
||||||
def __call__(self, input: Union[List[str], List[Image.Image]]) -> List[np.ndarray]:
|
|
||||||
self._load_model()
|
|
||||||
if (
|
|
||||||
self.model is None
|
|
||||||
or self.model.image_model is None
|
|
||||||
or self.model.text_model is None
|
|
||||||
):
|
|
||||||
logger.info(
|
|
||||||
"CLIP model is not fully loaded. Please wait for the download to complete."
|
|
||||||
)
|
|
||||||
return []
|
|
||||||
|
|
||||||
embeddings = []
|
|
||||||
for item in input:
|
|
||||||
if isinstance(item, Image.Image):
|
|
||||||
result = self.model.get_image_embeddings([item])
|
|
||||||
embeddings.append(result[0])
|
|
||||||
elif isinstance(item, str):
|
|
||||||
result = self.model.get_text_embeddings([item])
|
|
||||||
embeddings.append(result[0])
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Unsupported input type: {type(item)}")
|
|
||||||
return embeddings
|
|
||||||
@@ -1,107 +0,0 @@
|
|||||||
import logging
|
|
||||||
import os
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import onnxruntime as ort
|
|
||||||
|
|
||||||
# importing this without pytorch or others causes a warning
|
|
||||||
# https://github.com/huggingface/transformers/issues/27214
|
|
||||||
# suppressed by setting env TRANSFORMERS_NO_ADVISORY_WARNINGS=1
|
|
||||||
from transformers import AutoTokenizer
|
|
||||||
|
|
||||||
from frigate.const import MODEL_CACHE_DIR, UPDATE_MODEL_STATE
|
|
||||||
from frigate.types import ModelStatusTypesEnum
|
|
||||||
from frigate.util.downloader import ModelDownloader
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class MiniLMEmbedding:
|
|
||||||
"""Embedding function for ONNX MiniLM-L6 model."""
|
|
||||||
|
|
||||||
DOWNLOAD_PATH = f"{MODEL_CACHE_DIR}/all-MiniLM-L6-v2"
|
|
||||||
MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
|
|
||||||
IMAGE_MODEL_FILE = "model.onnx"
|
|
||||||
TOKENIZER_FILE = "tokenizer"
|
|
||||||
|
|
||||||
def __init__(self, preferred_providers=["CPUExecutionProvider"]):
|
|
||||||
self.preferred_providers = preferred_providers
|
|
||||||
self.tokenizer = None
|
|
||||||
self.session = None
|
|
||||||
|
|
||||||
self.downloader = ModelDownloader(
|
|
||||||
model_name=self.MODEL_NAME,
|
|
||||||
download_path=self.DOWNLOAD_PATH,
|
|
||||||
file_names=[self.IMAGE_MODEL_FILE, self.TOKENIZER_FILE],
|
|
||||||
download_func=self._download_model,
|
|
||||||
)
|
|
||||||
self.downloader.ensure_model_files()
|
|
||||||
|
|
||||||
def _download_model(self, path: str):
|
|
||||||
try:
|
|
||||||
if os.path.basename(path) == self.IMAGE_MODEL_FILE:
|
|
||||||
s3_url = f"https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/resolve/main/onnx/{self.IMAGE_MODEL_FILE}"
|
|
||||||
ModelDownloader.download_from_url(s3_url, path)
|
|
||||||
elif os.path.basename(path) == self.TOKENIZER_FILE:
|
|
||||||
logger.info("Downloading MiniLM tokenizer")
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(
|
|
||||||
self.MODEL_NAME, clean_up_tokenization_spaces=True
|
|
||||||
)
|
|
||||||
tokenizer.save_pretrained(path)
|
|
||||||
|
|
||||||
self.downloader.requestor.send_data(
|
|
||||||
UPDATE_MODEL_STATE,
|
|
||||||
{
|
|
||||||
"model": f"{self.MODEL_NAME}-{os.path.basename(path)}",
|
|
||||||
"state": ModelStatusTypesEnum.downloaded,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
self.downloader.requestor.send_data(
|
|
||||||
UPDATE_MODEL_STATE,
|
|
||||||
{
|
|
||||||
"model": f"{self.MODEL_NAME}-{os.path.basename(path)}",
|
|
||||||
"state": ModelStatusTypesEnum.error,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
def _load_model_and_tokenizer(self):
|
|
||||||
if self.tokenizer is None or self.session is None:
|
|
||||||
self.downloader.wait_for_download()
|
|
||||||
self.tokenizer = self._load_tokenizer()
|
|
||||||
self.session = self._load_model(
|
|
||||||
os.path.join(self.DOWNLOAD_PATH, self.IMAGE_MODEL_FILE),
|
|
||||||
self.preferred_providers,
|
|
||||||
)
|
|
||||||
|
|
||||||
def _load_tokenizer(self):
|
|
||||||
tokenizer_path = os.path.join(self.DOWNLOAD_PATH, self.TOKENIZER_FILE)
|
|
||||||
return AutoTokenizer.from_pretrained(
|
|
||||||
tokenizer_path, clean_up_tokenization_spaces=True
|
|
||||||
)
|
|
||||||
|
|
||||||
def _load_model(self, path: str, providers: List[str]):
|
|
||||||
if os.path.exists(path):
|
|
||||||
return ort.InferenceSession(path, providers=providers)
|
|
||||||
else:
|
|
||||||
logger.warning(f"MiniLM model file {path} not found.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
def __call__(self, texts: List[str]) -> List[np.ndarray]:
|
|
||||||
self._load_model_and_tokenizer()
|
|
||||||
|
|
||||||
if self.session is None or self.tokenizer is None:
|
|
||||||
logger.error("MiniLM model or tokenizer is not loaded.")
|
|
||||||
return []
|
|
||||||
|
|
||||||
inputs = self.tokenizer(
|
|
||||||
texts, padding=True, truncation=True, return_tensors="np"
|
|
||||||
)
|
|
||||||
input_names = [input.name for input in self.session.get_inputs()]
|
|
||||||
onnx_inputs = {name: inputs[name] for name in input_names if name in inputs}
|
|
||||||
|
|
||||||
outputs = self.session.run(None, onnx_inputs)
|
|
||||||
embeddings = outputs[0].mean(axis=1)
|
|
||||||
|
|
||||||
return [embedding for embedding in embeddings]
|
|
||||||
216
frigate/embeddings/functions/onnx.py
Normal file
216
frigate/embeddings/functions/onnx.py
Normal file
@@ -0,0 +1,216 @@
|
|||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import warnings
|
||||||
|
from enum import Enum
|
||||||
|
from io import BytesIO
|
||||||
|
from typing import Dict, List, Optional, Union
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import requests
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
# importing this without pytorch or others causes a warning
|
||||||
|
# https://github.com/huggingface/transformers/issues/27214
|
||||||
|
# suppressed by setting env TRANSFORMERS_NO_ADVISORY_WARNINGS=1
|
||||||
|
from transformers import AutoFeatureExtractor, AutoTokenizer
|
||||||
|
from transformers.utils.logging import disable_progress_bar
|
||||||
|
|
||||||
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
|
from frigate.const import MODEL_CACHE_DIR, UPDATE_MODEL_STATE
|
||||||
|
from frigate.types import ModelStatusTypesEnum
|
||||||
|
from frigate.util.downloader import ModelDownloader
|
||||||
|
from frigate.util.model import ONNXModelRunner
|
||||||
|
|
||||||
|
warnings.filterwarnings(
|
||||||
|
"ignore",
|
||||||
|
category=FutureWarning,
|
||||||
|
message="The class CLIPFeatureExtractor is deprecated",
|
||||||
|
)
|
||||||
|
|
||||||
|
# disables the progress bar for downloading tokenizers and feature extractors
|
||||||
|
disable_progress_bar()
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ModelTypeEnum(str, Enum):
|
||||||
|
face = "face"
|
||||||
|
vision = "vision"
|
||||||
|
text = "text"
|
||||||
|
|
||||||
|
|
||||||
|
class GenericONNXEmbedding:
|
||||||
|
"""Generic embedding function for ONNX models (text and vision)."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model_name: str,
|
||||||
|
model_file: str,
|
||||||
|
download_urls: Dict[str, str],
|
||||||
|
model_size: str,
|
||||||
|
model_type: str,
|
||||||
|
requestor: InterProcessRequestor,
|
||||||
|
tokenizer_file: Optional[str] = None,
|
||||||
|
device: str = "AUTO",
|
||||||
|
):
|
||||||
|
self.model_name = model_name
|
||||||
|
self.model_file = model_file
|
||||||
|
self.tokenizer_file = tokenizer_file
|
||||||
|
self.requestor = requestor
|
||||||
|
self.download_urls = download_urls
|
||||||
|
self.model_type = model_type # 'text' or 'vision'
|
||||||
|
self.model_size = model_size
|
||||||
|
self.device = device
|
||||||
|
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
|
||||||
|
self.tokenizer = None
|
||||||
|
self.feature_extractor = None
|
||||||
|
self.runner = None
|
||||||
|
files_names = list(self.download_urls.keys()) + (
|
||||||
|
[self.tokenizer_file] if self.tokenizer_file else []
|
||||||
|
)
|
||||||
|
|
||||||
|
if not all(
|
||||||
|
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
|
||||||
|
):
|
||||||
|
logger.debug(f"starting model download for {self.model_name}")
|
||||||
|
self.downloader = ModelDownloader(
|
||||||
|
model_name=self.model_name,
|
||||||
|
download_path=self.download_path,
|
||||||
|
file_names=files_names,
|
||||||
|
download_func=self._download_model,
|
||||||
|
)
|
||||||
|
self.downloader.ensure_model_files()
|
||||||
|
else:
|
||||||
|
self.downloader = None
|
||||||
|
ModelDownloader.mark_files_state(
|
||||||
|
self.requestor,
|
||||||
|
self.model_name,
|
||||||
|
files_names,
|
||||||
|
ModelStatusTypesEnum.downloaded,
|
||||||
|
)
|
||||||
|
self._load_model_and_tokenizer()
|
||||||
|
logger.debug(f"models are already downloaded for {self.model_name}")
|
||||||
|
|
||||||
|
def _download_model(self, path: str):
|
||||||
|
try:
|
||||||
|
file_name = os.path.basename(path)
|
||||||
|
if file_name in self.download_urls:
|
||||||
|
ModelDownloader.download_from_url(self.download_urls[file_name], path)
|
||||||
|
elif (
|
||||||
|
file_name == self.tokenizer_file
|
||||||
|
and self.model_type == ModelTypeEnum.text
|
||||||
|
):
|
||||||
|
if not os.path.exists(path + "/" + self.model_name):
|
||||||
|
logger.info(f"Downloading {self.model_name} tokenizer")
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(
|
||||||
|
self.model_name,
|
||||||
|
trust_remote_code=True,
|
||||||
|
cache_dir=f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer",
|
||||||
|
clean_up_tokenization_spaces=True,
|
||||||
|
)
|
||||||
|
tokenizer.save_pretrained(path)
|
||||||
|
|
||||||
|
self.downloader.requestor.send_data(
|
||||||
|
UPDATE_MODEL_STATE,
|
||||||
|
{
|
||||||
|
"model": f"{self.model_name}-{file_name}",
|
||||||
|
"state": ModelStatusTypesEnum.downloaded,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
self.downloader.requestor.send_data(
|
||||||
|
UPDATE_MODEL_STATE,
|
||||||
|
{
|
||||||
|
"model": f"{self.model_name}-{file_name}",
|
||||||
|
"state": ModelStatusTypesEnum.error,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
def _load_model_and_tokenizer(self):
|
||||||
|
if self.runner is None:
|
||||||
|
if self.downloader:
|
||||||
|
self.downloader.wait_for_download()
|
||||||
|
if self.model_type == ModelTypeEnum.text:
|
||||||
|
self.tokenizer = self._load_tokenizer()
|
||||||
|
else:
|
||||||
|
self.feature_extractor = self._load_feature_extractor()
|
||||||
|
self.runner = ONNXModelRunner(
|
||||||
|
os.path.join(self.download_path, self.model_file),
|
||||||
|
self.device,
|
||||||
|
self.model_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _load_tokenizer(self):
|
||||||
|
tokenizer_path = os.path.join(f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer")
|
||||||
|
return AutoTokenizer.from_pretrained(
|
||||||
|
self.model_name,
|
||||||
|
cache_dir=tokenizer_path,
|
||||||
|
trust_remote_code=True,
|
||||||
|
clean_up_tokenization_spaces=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _load_feature_extractor(self):
|
||||||
|
return AutoFeatureExtractor.from_pretrained(
|
||||||
|
f"{MODEL_CACHE_DIR}/{self.model_name}",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _preprocess_inputs(self, raw_inputs: any) -> any:
|
||||||
|
if self.model_type == ModelTypeEnum.text:
|
||||||
|
max_length = max(len(self.tokenizer.encode(text)) for text in raw_inputs)
|
||||||
|
return [
|
||||||
|
self.tokenizer(
|
||||||
|
text,
|
||||||
|
padding="max_length",
|
||||||
|
truncation=True,
|
||||||
|
max_length=max_length,
|
||||||
|
return_tensors="np",
|
||||||
|
)
|
||||||
|
for text in raw_inputs
|
||||||
|
]
|
||||||
|
elif self.model_type == ModelTypeEnum.vision:
|
||||||
|
processed_images = [self._process_image(img) for img in raw_inputs]
|
||||||
|
return [
|
||||||
|
self.feature_extractor(images=image, return_tensors="np")
|
||||||
|
for image in processed_images
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unable to preprocess inputs for {self.model_type}")
|
||||||
|
|
||||||
|
def _process_image(self, image):
|
||||||
|
if isinstance(image, str):
|
||||||
|
if image.startswith("http"):
|
||||||
|
response = requests.get(image)
|
||||||
|
image = Image.open(BytesIO(response.content)).convert("RGB")
|
||||||
|
elif isinstance(image, bytes):
|
||||||
|
image = Image.open(BytesIO(image)).convert("RGB")
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
def __call__(
|
||||||
|
self, inputs: Union[List[str], List[Image.Image], List[str]]
|
||||||
|
) -> List[np.ndarray]:
|
||||||
|
self._load_model_and_tokenizer()
|
||||||
|
if self.runner is None or (
|
||||||
|
self.tokenizer is None and self.feature_extractor is None
|
||||||
|
):
|
||||||
|
logger.error(
|
||||||
|
f"{self.model_name} model or tokenizer/feature extractor is not loaded."
|
||||||
|
)
|
||||||
|
return []
|
||||||
|
|
||||||
|
processed_inputs = self._preprocess_inputs(inputs)
|
||||||
|
input_names = self.runner.get_input_names()
|
||||||
|
onnx_inputs = {name: [] for name in input_names}
|
||||||
|
input: dict[str, any]
|
||||||
|
for input in processed_inputs:
|
||||||
|
for key, value in input.items():
|
||||||
|
if key in input_names:
|
||||||
|
onnx_inputs[key].append(value[0])
|
||||||
|
|
||||||
|
for key in input_names:
|
||||||
|
if onnx_inputs.get(key):
|
||||||
|
onnx_inputs[key] = np.stack(onnx_inputs[key])
|
||||||
|
else:
|
||||||
|
logger.warning(f"Expected input '{key}' not found in onnx_inputs")
|
||||||
|
|
||||||
|
embeddings = self.runner.run(onnx_inputs)[0]
|
||||||
|
return [embedding for embedding in embeddings]
|
||||||
@@ -12,6 +12,7 @@ import numpy as np
|
|||||||
from peewee import DoesNotExist
|
from peewee import DoesNotExist
|
||||||
from playhouse.sqliteq import SqliteQueueDatabase
|
from playhouse.sqliteq import SqliteQueueDatabase
|
||||||
|
|
||||||
|
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsResponder
|
||||||
from frigate.comms.event_metadata_updater import (
|
from frigate.comms.event_metadata_updater import (
|
||||||
EventMetadataSubscriber,
|
EventMetadataSubscriber,
|
||||||
EventMetadataTypeEnum,
|
EventMetadataTypeEnum,
|
||||||
@@ -23,12 +24,16 @@ from frigate.const import CLIPS_DIR, UPDATE_EVENT_DESCRIPTION
|
|||||||
from frigate.events.types import EventTypeEnum
|
from frigate.events.types import EventTypeEnum
|
||||||
from frigate.genai import get_genai_client
|
from frigate.genai import get_genai_client
|
||||||
from frigate.models import Event
|
from frigate.models import Event
|
||||||
|
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||||
|
from frigate.util.builtin import serialize
|
||||||
from frigate.util.image import SharedMemoryFrameManager, calculate_region
|
from frigate.util.image import SharedMemoryFrameManager, calculate_region
|
||||||
|
|
||||||
from .embeddings import Embeddings
|
from .embeddings import Embeddings
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
MAX_THUMBNAILS = 10
|
||||||
|
|
||||||
|
|
||||||
class EmbeddingMaintainer(threading.Thread):
|
class EmbeddingMaintainer(threading.Thread):
|
||||||
"""Handle embedding queue and post event updates."""
|
"""Handle embedding queue and post event updates."""
|
||||||
@@ -39,25 +44,31 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
config: FrigateConfig,
|
config: FrigateConfig,
|
||||||
stop_event: MpEvent,
|
stop_event: MpEvent,
|
||||||
) -> None:
|
) -> None:
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name="embeddings_maintainer")
|
||||||
self.name = "embeddings_maintainer"
|
|
||||||
self.config = config
|
self.config = config
|
||||||
self.embeddings = Embeddings(db)
|
self.embeddings = Embeddings(config.semantic_search, db)
|
||||||
|
|
||||||
|
# Check if we need to re-index events
|
||||||
|
if config.semantic_search.reindex:
|
||||||
|
self.embeddings.reindex()
|
||||||
|
|
||||||
self.event_subscriber = EventUpdateSubscriber()
|
self.event_subscriber = EventUpdateSubscriber()
|
||||||
self.event_end_subscriber = EventEndSubscriber()
|
self.event_end_subscriber = EventEndSubscriber()
|
||||||
self.event_metadata_subscriber = EventMetadataSubscriber(
|
self.event_metadata_subscriber = EventMetadataSubscriber(
|
||||||
EventMetadataTypeEnum.regenerate_description
|
EventMetadataTypeEnum.regenerate_description
|
||||||
)
|
)
|
||||||
|
self.embeddings_responder = EmbeddingsResponder()
|
||||||
self.frame_manager = SharedMemoryFrameManager()
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
# create communication for updating event descriptions
|
# create communication for updating event descriptions
|
||||||
self.requestor = InterProcessRequestor()
|
self.requestor = InterProcessRequestor()
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
self.tracked_events = {}
|
self.tracked_events = {}
|
||||||
self.genai_client = get_genai_client(config.genai)
|
self.genai_client = get_genai_client(config)
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
"""Maintain a SQLite-vec database for semantic search."""
|
"""Maintain a SQLite-vec database for semantic search."""
|
||||||
while not self.stop_event.is_set():
|
while not self.stop_event.is_set():
|
||||||
|
self._process_requests()
|
||||||
self._process_updates()
|
self._process_updates()
|
||||||
self._process_finalized()
|
self._process_finalized()
|
||||||
self._process_event_metadata()
|
self._process_event_metadata()
|
||||||
@@ -65,41 +76,86 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
self.event_subscriber.stop()
|
self.event_subscriber.stop()
|
||||||
self.event_end_subscriber.stop()
|
self.event_end_subscriber.stop()
|
||||||
self.event_metadata_subscriber.stop()
|
self.event_metadata_subscriber.stop()
|
||||||
|
self.embeddings_responder.stop()
|
||||||
self.requestor.stop()
|
self.requestor.stop()
|
||||||
logger.info("Exiting embeddings maintenance...")
|
logger.info("Exiting embeddings maintenance...")
|
||||||
|
|
||||||
|
def _process_requests(self) -> None:
|
||||||
|
"""Process embeddings requests"""
|
||||||
|
|
||||||
|
def _handle_request(topic: str, data: str) -> str:
|
||||||
|
try:
|
||||||
|
if topic == EmbeddingsRequestEnum.embed_description.value:
|
||||||
|
return serialize(
|
||||||
|
self.embeddings.embed_description(
|
||||||
|
data["id"], data["description"]
|
||||||
|
),
|
||||||
|
pack=False,
|
||||||
|
)
|
||||||
|
elif topic == EmbeddingsRequestEnum.embed_thumbnail.value:
|
||||||
|
thumbnail = base64.b64decode(data["thumbnail"])
|
||||||
|
return serialize(
|
||||||
|
self.embeddings.embed_thumbnail(data["id"], thumbnail),
|
||||||
|
pack=False,
|
||||||
|
)
|
||||||
|
elif topic == EmbeddingsRequestEnum.generate_search.value:
|
||||||
|
return serialize(
|
||||||
|
self.embeddings.text_embedding([data])[0], pack=False
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unable to handle embeddings request {e}")
|
||||||
|
|
||||||
|
self.embeddings_responder.check_for_request(_handle_request)
|
||||||
|
|
||||||
def _process_updates(self) -> None:
|
def _process_updates(self) -> None:
|
||||||
"""Process event updates"""
|
"""Process event updates"""
|
||||||
update = self.event_subscriber.check_for_update()
|
update = self.event_subscriber.check_for_update(timeout=0.1)
|
||||||
|
|
||||||
if update is None:
|
if update is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
source_type, _, camera, data = update
|
source_type, _, camera, frame_name, data = update
|
||||||
|
|
||||||
if not camera or source_type != EventTypeEnum.tracked_object:
|
if not camera or source_type != EventTypeEnum.tracked_object:
|
||||||
return
|
return
|
||||||
|
|
||||||
camera_config = self.config.cameras[camera]
|
camera_config = self.config.cameras[camera]
|
||||||
|
# no need to save our own thumbnails if genai is not enabled
|
||||||
|
# or if the object has become stationary
|
||||||
|
if (
|
||||||
|
not camera_config.genai.enabled
|
||||||
|
or self.genai_client is None
|
||||||
|
or data["stationary"]
|
||||||
|
):
|
||||||
|
return
|
||||||
|
|
||||||
if data["id"] not in self.tracked_events:
|
if data["id"] not in self.tracked_events:
|
||||||
self.tracked_events[data["id"]] = []
|
self.tracked_events[data["id"]] = []
|
||||||
|
|
||||||
# Create our own thumbnail based on the bounding box and the frame time
|
# Create our own thumbnail based on the bounding box and the frame time
|
||||||
try:
|
try:
|
||||||
frame_id = f"{camera}{data['frame_time']}"
|
yuv_frame = self.frame_manager.get(
|
||||||
yuv_frame = self.frame_manager.get(frame_id, camera_config.frame_shape_yuv)
|
frame_name, camera_config.frame_shape_yuv
|
||||||
|
)
|
||||||
|
|
||||||
if yuv_frame is not None:
|
if yuv_frame is not None:
|
||||||
data["thumbnail"] = self._create_thumbnail(yuv_frame, data["box"])
|
data["thumbnail"] = self._create_thumbnail(yuv_frame, data["box"])
|
||||||
|
|
||||||
|
# Limit the number of thumbnails saved
|
||||||
|
if len(self.tracked_events[data["id"]]) >= MAX_THUMBNAILS:
|
||||||
|
# Always keep the first thumbnail for the event
|
||||||
|
self.tracked_events[data["id"]].pop(1)
|
||||||
|
|
||||||
self.tracked_events[data["id"]].append(data)
|
self.tracked_events[data["id"]].append(data)
|
||||||
self.frame_manager.close(frame_id)
|
|
||||||
|
self.frame_manager.close(frame_name)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def _process_finalized(self) -> None:
|
def _process_finalized(self) -> None:
|
||||||
"""Process the end of an event."""
|
"""Process the end of an event."""
|
||||||
while True:
|
while True:
|
||||||
ended = self.event_end_subscriber.check_for_update()
|
ended = self.event_end_subscriber.check_for_update(timeout=0.1)
|
||||||
|
|
||||||
if ended == None:
|
if ended == None:
|
||||||
break
|
break
|
||||||
@@ -136,9 +192,6 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
or set(event.zones) & set(camera_config.genai.required_zones)
|
or set(event.zones) & set(camera_config.genai.required_zones)
|
||||||
)
|
)
|
||||||
):
|
):
|
||||||
logger.debug(
|
|
||||||
f"Description generation for {event}, has_snapshot: {event.has_snapshot}"
|
|
||||||
)
|
|
||||||
if event.has_snapshot and camera_config.genai.use_snapshot:
|
if event.has_snapshot and camera_config.genai.use_snapshot:
|
||||||
with open(
|
with open(
|
||||||
os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"),
|
os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"),
|
||||||
@@ -192,7 +245,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
def _process_event_metadata(self):
|
def _process_event_metadata(self):
|
||||||
# Check for regenerate description requests
|
# Check for regenerate description requests
|
||||||
(topic, event_id, source) = self.event_metadata_subscriber.check_for_update(
|
(topic, event_id, source) = self.event_metadata_subscriber.check_for_update(
|
||||||
timeout=1
|
timeout=0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
if topic is None:
|
if topic is None:
|
||||||
@@ -219,14 +272,14 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None:
|
def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None:
|
||||||
"""Embed the thumbnail for an event."""
|
"""Embed the thumbnail for an event."""
|
||||||
self.embeddings.upsert_thumbnail(event_id, thumbnail)
|
self.embeddings.embed_thumbnail(event_id, thumbnail)
|
||||||
|
|
||||||
def _embed_description(self, event: Event, thumbnails: list[bytes]) -> None:
|
def _embed_description(self, event: Event, thumbnails: list[bytes]) -> None:
|
||||||
"""Embed the description for an event."""
|
"""Embed the description for an event."""
|
||||||
camera_config = self.config.cameras[event.camera]
|
camera_config = self.config.cameras[event.camera]
|
||||||
|
|
||||||
description = self.genai_client.generate_description(
|
description = self.genai_client.generate_description(
|
||||||
camera_config, thumbnails, event.label
|
camera_config, thumbnails, event
|
||||||
)
|
)
|
||||||
|
|
||||||
if not description:
|
if not description:
|
||||||
@@ -236,11 +289,15 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
# fire and forget description update
|
# fire and forget description update
|
||||||
self.requestor.send_data(
|
self.requestor.send_data(
|
||||||
UPDATE_EVENT_DESCRIPTION,
|
UPDATE_EVENT_DESCRIPTION,
|
||||||
{"id": event.id, "description": description},
|
{
|
||||||
|
"type": TrackedObjectUpdateTypesEnum.description,
|
||||||
|
"id": event.id,
|
||||||
|
"description": description,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
# Encode the description
|
# Embed the description
|
||||||
self.embeddings.upsert_description(event.id, description)
|
self.embeddings.embed_description(event.id, description)
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Generated description for %s (%d images): %s",
|
"Generated description for %s (%d images): %s",
|
||||||
|
|||||||
@@ -20,9 +20,10 @@ class ZScoreNormalization:
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def stddev(self):
|
def stddev(self):
|
||||||
return math.sqrt(self.variance)
|
return math.sqrt(self.variance) if self.variance > 0 else 0.0
|
||||||
|
|
||||||
def normalize(self, distances: list[float]):
|
def normalize(self, distances: list[float], save_stats: bool):
|
||||||
|
if save_stats:
|
||||||
self._update(distances)
|
self._update(distances)
|
||||||
if self.stddev == 0:
|
if self.stddev == 0:
|
||||||
return distances
|
return distances
|
||||||
|
|||||||
@@ -64,6 +64,8 @@ def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]:
|
|||||||
|
|
||||||
|
|
||||||
class AudioProcessor(util.Process):
|
class AudioProcessor(util.Process):
|
||||||
|
name = "frigate.audio_manager"
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
cameras: list[CameraConfig],
|
cameras: list[CameraConfig],
|
||||||
|
|||||||
@@ -8,11 +8,9 @@ from enum import Enum
|
|||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from playhouse.sqliteq import SqliteQueueDatabase
|
|
||||||
|
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
from frigate.const import CLIPS_DIR
|
from frigate.const import CLIPS_DIR
|
||||||
from frigate.embeddings.embeddings import Embeddings
|
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||||
from frigate.models import Event, Timeline
|
from frigate.models import Event, Timeline
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -23,9 +21,12 @@ class EventCleanupType(str, Enum):
|
|||||||
snapshots = "snapshots"
|
snapshots = "snapshots"
|
||||||
|
|
||||||
|
|
||||||
|
CHUNK_SIZE = 50
|
||||||
|
|
||||||
|
|
||||||
class EventCleanup(threading.Thread):
|
class EventCleanup(threading.Thread):
|
||||||
def __init__(
|
def __init__(
|
||||||
self, config: FrigateConfig, stop_event: MpEvent, db: SqliteQueueDatabase
|
self, config: FrigateConfig, stop_event: MpEvent, db: SqliteVecQueueDatabase
|
||||||
):
|
):
|
||||||
super().__init__(name="event_cleanup")
|
super().__init__(name="event_cleanup")
|
||||||
self.config = config
|
self.config = config
|
||||||
@@ -35,9 +36,6 @@ class EventCleanup(threading.Thread):
|
|||||||
self.removed_camera_labels: list[str] = None
|
self.removed_camera_labels: list[str] = None
|
||||||
self.camera_labels: dict[str, dict[str, any]] = {}
|
self.camera_labels: dict[str, dict[str, any]] = {}
|
||||||
|
|
||||||
if self.config.semantic_search.enabled:
|
|
||||||
self.embeddings = Embeddings(self.db)
|
|
||||||
|
|
||||||
def get_removed_camera_labels(self) -> list[Event]:
|
def get_removed_camera_labels(self) -> list[Event]:
|
||||||
"""Get a list of distinct labels for removed cameras."""
|
"""Get a list of distinct labels for removed cameras."""
|
||||||
if self.removed_camera_labels is None:
|
if self.removed_camera_labels is None:
|
||||||
@@ -112,6 +110,7 @@ class EventCleanup(threading.Thread):
|
|||||||
.namedtuples()
|
.namedtuples()
|
||||||
.iterator()
|
.iterator()
|
||||||
)
|
)
|
||||||
|
logger.debug(f"{len(expired_events)} events can be expired")
|
||||||
# delete the media from disk
|
# delete the media from disk
|
||||||
for expired in expired_events:
|
for expired in expired_events:
|
||||||
media_name = f"{expired.camera}-{expired.id}"
|
media_name = f"{expired.camera}-{expired.id}"
|
||||||
@@ -130,13 +129,34 @@ class EventCleanup(threading.Thread):
|
|||||||
logger.warning(f"Unable to delete event images: {e}")
|
logger.warning(f"Unable to delete event images: {e}")
|
||||||
|
|
||||||
# update the clips attribute for the db entry
|
# update the clips attribute for the db entry
|
||||||
update_query = Event.update(update_params).where(
|
query = Event.select(Event.id).where(
|
||||||
Event.camera.not_in(self.camera_keys),
|
Event.camera.not_in(self.camera_keys),
|
||||||
Event.start_time < expire_after,
|
Event.start_time < expire_after,
|
||||||
Event.label == event.label,
|
Event.label == event.label,
|
||||||
Event.retain_indefinitely == False,
|
Event.retain_indefinitely == False,
|
||||||
)
|
)
|
||||||
update_query.execute()
|
|
||||||
|
events_to_update = []
|
||||||
|
|
||||||
|
for batch in query.iterator():
|
||||||
|
events_to_update.extend([event.id for event in batch])
|
||||||
|
if len(events_to_update) >= CHUNK_SIZE:
|
||||||
|
logger.debug(
|
||||||
|
f"Updating {update_params} for {len(events_to_update)} events"
|
||||||
|
)
|
||||||
|
Event.update(update_params).where(
|
||||||
|
Event.id << events_to_update
|
||||||
|
).execute()
|
||||||
|
events_to_update = []
|
||||||
|
|
||||||
|
# Update any remaining events
|
||||||
|
if events_to_update:
|
||||||
|
logger.debug(
|
||||||
|
f"Updating clips/snapshots attribute for {len(events_to_update)} events"
|
||||||
|
)
|
||||||
|
Event.update(update_params).where(
|
||||||
|
Event.id << events_to_update
|
||||||
|
).execute()
|
||||||
|
|
||||||
events_to_update = []
|
events_to_update = []
|
||||||
|
|
||||||
@@ -201,7 +221,11 @@ class EventCleanup(threading.Thread):
|
|||||||
logger.warning(f"Unable to delete event images: {e}")
|
logger.warning(f"Unable to delete event images: {e}")
|
||||||
|
|
||||||
# update the clips attribute for the db entry
|
# update the clips attribute for the db entry
|
||||||
Event.update(update_params).where(Event.id << events_to_update).execute()
|
for i in range(0, len(events_to_update), CHUNK_SIZE):
|
||||||
|
batch = events_to_update[i : i + CHUNK_SIZE]
|
||||||
|
logger.debug(f"Updating {update_params} for {len(batch)} events")
|
||||||
|
Event.update(update_params).where(Event.id << batch).execute()
|
||||||
|
|
||||||
return events_to_update
|
return events_to_update
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
@@ -227,15 +251,16 @@ class EventCleanup(threading.Thread):
|
|||||||
.iterator()
|
.iterator()
|
||||||
)
|
)
|
||||||
events_to_delete = [e.id for e in events]
|
events_to_delete = [e.id for e in events]
|
||||||
|
logger.debug(f"Found {len(events_to_delete)} events that can be expired")
|
||||||
if len(events_to_delete) > 0:
|
if len(events_to_delete) > 0:
|
||||||
chunk_size = 50
|
for i in range(0, len(events_to_delete), CHUNK_SIZE):
|
||||||
for i in range(0, len(events_to_delete), chunk_size):
|
chunk = events_to_delete[i : i + CHUNK_SIZE]
|
||||||
chunk = events_to_delete[i : i + chunk_size]
|
logger.debug(f"Deleting {len(chunk)} events from the database")
|
||||||
Event.delete().where(Event.id << chunk).execute()
|
Event.delete().where(Event.id << chunk).execute()
|
||||||
|
|
||||||
if self.config.semantic_search.enabled:
|
if self.config.semantic_search.enabled:
|
||||||
self.embeddings.delete_description(chunk)
|
self.db.delete_embeddings_description(event_ids=chunk)
|
||||||
self.embeddings.delete_thumbnail(chunk)
|
self.db.delete_embeddings_thumbnail(event_ids=chunk)
|
||||||
logger.debug(f"Deleted {len(events_to_delete)} embeddings")
|
logger.debug(f"Deleted {len(events_to_delete)} embeddings")
|
||||||
|
|
||||||
logger.info("Exiting event cleanup...")
|
logger.info("Exiting event cleanup...")
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ class ExternalEventProcessor:
|
|||||||
"sub_label": sub_label,
|
"sub_label": sub_label,
|
||||||
"score": score,
|
"score": score,
|
||||||
"camera": camera,
|
"camera": camera,
|
||||||
"start_time": now,
|
"start_time": now - camera_config.record.event_pre_capture,
|
||||||
"end_time": end,
|
"end_time": end,
|
||||||
"thumbnail": thumbnail,
|
"thumbnail": thumbnail,
|
||||||
"has_clip": camera_config.record.enabled and include_recording,
|
"has_clip": camera_config.record.enabled and include_recording,
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ class EventProcessor(threading.Thread):
|
|||||||
if update == None:
|
if update == None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
source_type, event_type, camera, event_data = update
|
source_type, event_type, camera, _, event_data = update
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"Event received: {source_type} {event_type} {camera} {event_data['id']}"
|
f"Event received: {source_type} {event_type} {camera} {event_data['id']}"
|
||||||
|
|||||||
@@ -1,10 +1,16 @@
|
|||||||
"""Generative AI module for Frigate."""
|
"""Generative AI module for Frigate."""
|
||||||
|
|
||||||
import importlib
|
import importlib
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum
|
from playhouse.shortcuts import model_to_dict
|
||||||
|
|
||||||
|
from frigate.config import CameraConfig, FrigateConfig, GenAIConfig, GenAIProviderEnum
|
||||||
|
from frigate.models import Event
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
PROVIDERS = {}
|
PROVIDERS = {}
|
||||||
|
|
||||||
@@ -31,12 +37,14 @@ class GenAIClient:
|
|||||||
self,
|
self,
|
||||||
camera_config: CameraConfig,
|
camera_config: CameraConfig,
|
||||||
thumbnails: list[bytes],
|
thumbnails: list[bytes],
|
||||||
label: str,
|
event: Event,
|
||||||
) -> Optional[str]:
|
) -> Optional[str]:
|
||||||
"""Generate a description for the frame."""
|
"""Generate a description for the frame."""
|
||||||
prompt = camera_config.genai.object_prompts.get(
|
prompt = camera_config.genai.object_prompts.get(
|
||||||
label, camera_config.genai.prompt
|
event.label,
|
||||||
)
|
camera_config.genai.prompt,
|
||||||
|
).format(**model_to_dict(event))
|
||||||
|
logger.debug(f"Sending images to genai provider with prompt: {prompt}")
|
||||||
return self._send(prompt, thumbnails)
|
return self._send(prompt, thumbnails)
|
||||||
|
|
||||||
def _init_provider(self):
|
def _init_provider(self):
|
||||||
@@ -48,13 +56,19 @@ class GenAIClient:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def get_genai_client(genai_config: GenAIConfig) -> Optional[GenAIClient]:
|
def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]:
|
||||||
"""Get the GenAI client."""
|
"""Get the GenAI client."""
|
||||||
if genai_config.enabled:
|
genai_config = config.genai
|
||||||
|
genai_cameras = [
|
||||||
|
c for c in config.cameras.values() if c.enabled and c.genai.enabled
|
||||||
|
]
|
||||||
|
|
||||||
|
if genai_cameras:
|
||||||
load_providers()
|
load_providers()
|
||||||
provider = PROVIDERS.get(genai_config.provider)
|
provider = PROVIDERS.get(genai_config.provider)
|
||||||
if provider:
|
if provider:
|
||||||
return provider(genai_config)
|
return provider(genai_config)
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -21,12 +21,20 @@ class OllamaClient(GenAIClient):
|
|||||||
|
|
||||||
def _init_provider(self):
|
def _init_provider(self):
|
||||||
"""Initialize the client."""
|
"""Initialize the client."""
|
||||||
|
try:
|
||||||
client = ApiClient(host=self.genai_config.base_url, timeout=self.timeout)
|
client = ApiClient(host=self.genai_config.base_url, timeout=self.timeout)
|
||||||
response = client.pull(self.genai_config.model)
|
# ensure the model is available locally
|
||||||
if response["status"] != "success":
|
response = client.show(self.genai_config.model)
|
||||||
logger.error("Failed to pull %s model from Ollama", self.genai_config.model)
|
if response.get("error"):
|
||||||
|
logger.error(
|
||||||
|
"Ollama error: %s",
|
||||||
|
response["error"],
|
||||||
|
)
|
||||||
return None
|
return None
|
||||||
return client
|
return client
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Error initializing Ollama: %s", str(e))
|
||||||
|
return None
|
||||||
|
|
||||||
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
|
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
|
||||||
"""Submit a request to Ollama"""
|
"""Submit a request to Ollama"""
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ class ReviewSegment(Model): # type: ignore[misc]
|
|||||||
start_time = DateTimeField()
|
start_time = DateTimeField()
|
||||||
end_time = DateTimeField()
|
end_time = DateTimeField()
|
||||||
has_been_reviewed = BooleanField(default=False)
|
has_been_reviewed = BooleanField(default=False)
|
||||||
severity = CharField(max_length=30) # alert, detection, significant_motion
|
severity = CharField(max_length=30) # alert, detection
|
||||||
thumb_path = CharField(unique=True)
|
thumb_path = CharField(unique=True)
|
||||||
data = JSONField() # additional data about detection like list of labels, zone, areas of significant motion
|
data = JSONField() # additional data about detection like list of labels, zone, areas of significant motion
|
||||||
|
|
||||||
|
|||||||
@@ -59,3 +59,7 @@ ignore_errors = false
|
|||||||
[mypy-frigate.watchdog]
|
[mypy-frigate.watchdog]
|
||||||
ignore_errors = false
|
ignore_errors = false
|
||||||
disallow_untyped_calls = false
|
disallow_untyped_calls = false
|
||||||
|
|
||||||
|
|
||||||
|
[mypy-frigate.service_manager.*]
|
||||||
|
ignore_errors = false
|
||||||
|
|||||||
@@ -12,10 +12,14 @@ from setproctitle import setproctitle
|
|||||||
|
|
||||||
import frigate.util as util
|
import frigate.util as util
|
||||||
from frigate.detectors import create_detector
|
from frigate.detectors import create_detector
|
||||||
from frigate.detectors.detector_config import BaseDetectorConfig, InputTensorEnum
|
from frigate.detectors.detector_config import (
|
||||||
|
BaseDetectorConfig,
|
||||||
|
InputDTypeEnum,
|
||||||
|
InputTensorEnum,
|
||||||
|
)
|
||||||
from frigate.detectors.plugins.rocm import DETECTOR_KEY as ROCM_DETECTOR_KEY
|
from frigate.detectors.plugins.rocm import DETECTOR_KEY as ROCM_DETECTOR_KEY
|
||||||
from frigate.util.builtin import EventsPerSecond, load_labels
|
from frigate.util.builtin import EventsPerSecond, load_labels
|
||||||
from frigate.util.image import SharedMemoryFrameManager
|
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
|
||||||
from frigate.util.services import listen
|
from frigate.util.services import listen
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -55,12 +59,15 @@ class LocalObjectDetector(ObjectDetector):
|
|||||||
self.input_transform = tensor_transform(
|
self.input_transform = tensor_transform(
|
||||||
detector_config.model.input_tensor
|
detector_config.model.input_tensor
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.dtype = detector_config.model.input_dtype
|
||||||
else:
|
else:
|
||||||
self.input_transform = None
|
self.input_transform = None
|
||||||
|
self.dtype = InputDTypeEnum.int
|
||||||
|
|
||||||
self.detect_api = create_detector(detector_config)
|
self.detect_api = create_detector(detector_config)
|
||||||
|
|
||||||
def detect(self, tensor_input, threshold=0.4):
|
def detect(self, tensor_input: np.ndarray, threshold=0.4):
|
||||||
detections = []
|
detections = []
|
||||||
|
|
||||||
raw_detections = self.detect_raw(tensor_input)
|
raw_detections = self.detect_raw(tensor_input)
|
||||||
@@ -77,9 +84,14 @@ class LocalObjectDetector(ObjectDetector):
|
|||||||
self.fps.update()
|
self.fps.update()
|
||||||
return detections
|
return detections
|
||||||
|
|
||||||
def detect_raw(self, tensor_input):
|
def detect_raw(self, tensor_input: np.ndarray):
|
||||||
if self.input_transform:
|
if self.input_transform:
|
||||||
tensor_input = np.transpose(tensor_input, self.input_transform)
|
tensor_input = np.transpose(tensor_input, self.input_transform)
|
||||||
|
|
||||||
|
if self.dtype == InputDTypeEnum.float:
|
||||||
|
tensor_input = tensor_input.astype(np.float32)
|
||||||
|
tensor_input /= 255
|
||||||
|
|
||||||
return self.detect_api.detect_raw(tensor_input=tensor_input)
|
return self.detect_api.detect_raw(tensor_input=tensor_input)
|
||||||
|
|
||||||
|
|
||||||
@@ -110,7 +122,7 @@ def run_detector(
|
|||||||
|
|
||||||
outputs = {}
|
outputs = {}
|
||||||
for name in out_events.keys():
|
for name in out_events.keys():
|
||||||
out_shm = mp.shared_memory.SharedMemory(name=f"out-{name}", create=False)
|
out_shm = UntrackedSharedMemory(name=f"out-{name}", create=False)
|
||||||
out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
|
out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
|
||||||
outputs[name] = {"shm": out_shm, "np": out_np}
|
outputs[name] = {"shm": out_shm, "np": out_np}
|
||||||
|
|
||||||
@@ -200,15 +212,13 @@ class RemoteObjectDetector:
|
|||||||
self.detection_queue = detection_queue
|
self.detection_queue = detection_queue
|
||||||
self.event = event
|
self.event = event
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
|
self.shm = UntrackedSharedMemory(name=self.name, create=False)
|
||||||
self.np_shm = np.ndarray(
|
self.np_shm = np.ndarray(
|
||||||
(1, model_config.height, model_config.width, 3),
|
(1, model_config.height, model_config.width, 3),
|
||||||
dtype=np.uint8,
|
dtype=np.uint8,
|
||||||
buffer=self.shm.buf,
|
buffer=self.shm.buf,
|
||||||
)
|
)
|
||||||
self.out_shm = mp.shared_memory.SharedMemory(
|
self.out_shm = UntrackedSharedMemory(name=f"out-{self.name}", create=False)
|
||||||
name=f"out-{self.name}", create=False
|
|
||||||
)
|
|
||||||
self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)
|
self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)
|
||||||
|
|
||||||
def detect(self, tensor_input, threshold=0.4):
|
def detect(self, tensor_input, threshold=0.4):
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
import base64
|
|
||||||
import datetime
|
import datetime
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
@@ -7,7 +6,6 @@ import queue
|
|||||||
import threading
|
import threading
|
||||||
from collections import Counter, defaultdict
|
from collections import Counter, defaultdict
|
||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
from statistics import median
|
|
||||||
from typing import Callable
|
from typing import Callable
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
@@ -18,7 +16,6 @@ from frigate.comms.dispatcher import Dispatcher
|
|||||||
from frigate.comms.events_updater import EventEndSubscriber, EventUpdatePublisher
|
from frigate.comms.events_updater import EventEndSubscriber, EventUpdatePublisher
|
||||||
from frigate.comms.inter_process import InterProcessRequestor
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
from frigate.config import (
|
from frigate.config import (
|
||||||
CameraConfig,
|
|
||||||
FrigateConfig,
|
FrigateConfig,
|
||||||
MqttConfig,
|
MqttConfig,
|
||||||
RecordConfig,
|
RecordConfig,
|
||||||
@@ -28,458 +25,18 @@ from frigate.config import (
|
|||||||
from frigate.const import CLIPS_DIR, UPDATE_CAMERA_ACTIVITY
|
from frigate.const import CLIPS_DIR, UPDATE_CAMERA_ACTIVITY
|
||||||
from frigate.events.types import EventStateEnum, EventTypeEnum
|
from frigate.events.types import EventStateEnum, EventTypeEnum
|
||||||
from frigate.ptz.autotrack import PtzAutoTrackerThread
|
from frigate.ptz.autotrack import PtzAutoTrackerThread
|
||||||
|
from frigate.track.tracked_object import TrackedObject
|
||||||
from frigate.util.image import (
|
from frigate.util.image import (
|
||||||
SharedMemoryFrameManager,
|
SharedMemoryFrameManager,
|
||||||
area,
|
|
||||||
calculate_region,
|
|
||||||
draw_box_with_label,
|
draw_box_with_label,
|
||||||
draw_timestamp,
|
draw_timestamp,
|
||||||
|
is_better_thumbnail,
|
||||||
is_label_printable,
|
is_label_printable,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def on_edge(box, frame_shape):
|
|
||||||
if (
|
|
||||||
box[0] == 0
|
|
||||||
or box[1] == 0
|
|
||||||
or box[2] == frame_shape[1] - 1
|
|
||||||
or box[3] == frame_shape[0] - 1
|
|
||||||
):
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def has_better_attr(current_thumb, new_obj, attr_label) -> bool:
|
|
||||||
max_new_attr = max(
|
|
||||||
[0]
|
|
||||||
+ [area(a["box"]) for a in new_obj["attributes"] if a["label"] == attr_label]
|
|
||||||
)
|
|
||||||
max_current_attr = max(
|
|
||||||
[0]
|
|
||||||
+ [
|
|
||||||
area(a["box"])
|
|
||||||
for a in current_thumb["attributes"]
|
|
||||||
if a["label"] == attr_label
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
# if the thumb has a higher scoring attr
|
|
||||||
return max_new_attr > max_current_attr
|
|
||||||
|
|
||||||
|
|
||||||
def is_better_thumbnail(label, current_thumb, new_obj, frame_shape) -> bool:
|
|
||||||
# larger is better
|
|
||||||
# cutoff images are less ideal, but they should also be smaller?
|
|
||||||
# better scores are obviously better too
|
|
||||||
|
|
||||||
# check face on person
|
|
||||||
if label == "person":
|
|
||||||
if has_better_attr(current_thumb, new_obj, "face"):
|
|
||||||
return True
|
|
||||||
# if the current thumb has a face attr, dont update unless it gets better
|
|
||||||
if any([a["label"] == "face" for a in current_thumb["attributes"]]):
|
|
||||||
return False
|
|
||||||
|
|
||||||
# check license_plate on car
|
|
||||||
if label == "car":
|
|
||||||
if has_better_attr(current_thumb, new_obj, "license_plate"):
|
|
||||||
return True
|
|
||||||
# if the current thumb has a license_plate attr, dont update unless it gets better
|
|
||||||
if any([a["label"] == "license_plate" for a in current_thumb["attributes"]]):
|
|
||||||
return False
|
|
||||||
|
|
||||||
# if the new_thumb is on an edge, and the current thumb is not
|
|
||||||
if on_edge(new_obj["box"], frame_shape) and not on_edge(
|
|
||||||
current_thumb["box"], frame_shape
|
|
||||||
):
|
|
||||||
return False
|
|
||||||
|
|
||||||
# if the score is better by more than 5%
|
|
||||||
if new_obj["score"] > current_thumb["score"] + 0.05:
|
|
||||||
return True
|
|
||||||
|
|
||||||
# if the area is 10% larger
|
|
||||||
if new_obj["area"] > current_thumb["area"] * 1.1:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class TrackedObject:
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
camera,
|
|
||||||
colormap,
|
|
||||||
camera_config: CameraConfig,
|
|
||||||
frame_cache,
|
|
||||||
obj_data: dict[str, any],
|
|
||||||
):
|
|
||||||
# set the score history then remove as it is not part of object state
|
|
||||||
self.score_history = obj_data["score_history"]
|
|
||||||
del obj_data["score_history"]
|
|
||||||
|
|
||||||
self.obj_data = obj_data
|
|
||||||
self.camera = camera
|
|
||||||
self.colormap = colormap
|
|
||||||
self.camera_config = camera_config
|
|
||||||
self.frame_cache = frame_cache
|
|
||||||
self.zone_presence: dict[str, int] = {}
|
|
||||||
self.zone_loitering: dict[str, int] = {}
|
|
||||||
self.current_zones = []
|
|
||||||
self.entered_zones = []
|
|
||||||
self.attributes = defaultdict(float)
|
|
||||||
self.false_positive = True
|
|
||||||
self.has_clip = False
|
|
||||||
self.has_snapshot = False
|
|
||||||
self.top_score = self.computed_score = 0.0
|
|
||||||
self.thumbnail_data = None
|
|
||||||
self.last_updated = 0
|
|
||||||
self.last_published = 0
|
|
||||||
self.frame = None
|
|
||||||
self.active = True
|
|
||||||
self.previous = self.to_dict()
|
|
||||||
|
|
||||||
def _is_false_positive(self):
|
|
||||||
# once a true positive, always a true positive
|
|
||||||
if not self.false_positive:
|
|
||||||
return False
|
|
||||||
|
|
||||||
threshold = self.camera_config.objects.filters[self.obj_data["label"]].threshold
|
|
||||||
return self.computed_score < threshold
|
|
||||||
|
|
||||||
def compute_score(self):
|
|
||||||
"""get median of scores for object."""
|
|
||||||
return median(self.score_history)
|
|
||||||
|
|
||||||
def update(self, current_frame_time: float, obj_data, has_valid_frame: bool):
|
|
||||||
thumb_update = False
|
|
||||||
significant_change = False
|
|
||||||
autotracker_update = False
|
|
||||||
# if the object is not in the current frame, add a 0.0 to the score history
|
|
||||||
if obj_data["frame_time"] != current_frame_time:
|
|
||||||
self.score_history.append(0.0)
|
|
||||||
else:
|
|
||||||
self.score_history.append(obj_data["score"])
|
|
||||||
|
|
||||||
# only keep the last 10 scores
|
|
||||||
if len(self.score_history) > 10:
|
|
||||||
self.score_history = self.score_history[-10:]
|
|
||||||
|
|
||||||
# calculate if this is a false positive
|
|
||||||
self.computed_score = self.compute_score()
|
|
||||||
if self.computed_score > self.top_score:
|
|
||||||
self.top_score = self.computed_score
|
|
||||||
self.false_positive = self._is_false_positive()
|
|
||||||
self.active = self.is_active()
|
|
||||||
|
|
||||||
if not self.false_positive and has_valid_frame:
|
|
||||||
# determine if this frame is a better thumbnail
|
|
||||||
if self.thumbnail_data is None or is_better_thumbnail(
|
|
||||||
self.obj_data["label"],
|
|
||||||
self.thumbnail_data,
|
|
||||||
obj_data,
|
|
||||||
self.camera_config.frame_shape,
|
|
||||||
):
|
|
||||||
self.thumbnail_data = {
|
|
||||||
"frame_time": current_frame_time,
|
|
||||||
"box": obj_data["box"],
|
|
||||||
"area": obj_data["area"],
|
|
||||||
"region": obj_data["region"],
|
|
||||||
"score": obj_data["score"],
|
|
||||||
"attributes": obj_data["attributes"],
|
|
||||||
}
|
|
||||||
thumb_update = True
|
|
||||||
|
|
||||||
# check zones
|
|
||||||
current_zones = []
|
|
||||||
bottom_center = (obj_data["centroid"][0], obj_data["box"][3])
|
|
||||||
# check each zone
|
|
||||||
for name, zone in self.camera_config.zones.items():
|
|
||||||
# if the zone is not for this object type, skip
|
|
||||||
if len(zone.objects) > 0 and obj_data["label"] not in zone.objects:
|
|
||||||
continue
|
|
||||||
contour = zone.contour
|
|
||||||
zone_score = self.zone_presence.get(name, 0) + 1
|
|
||||||
# check if the object is in the zone
|
|
||||||
if cv2.pointPolygonTest(contour, bottom_center, False) >= 0:
|
|
||||||
# if the object passed the filters once, dont apply again
|
|
||||||
if name in self.current_zones or not zone_filtered(self, zone.filters):
|
|
||||||
# an object is only considered present in a zone if it has a zone inertia of 3+
|
|
||||||
if zone_score >= zone.inertia:
|
|
||||||
loitering_score = self.zone_loitering.get(name, 0) + 1
|
|
||||||
|
|
||||||
# loitering time is configured as seconds, convert to count of frames
|
|
||||||
if loitering_score >= (
|
|
||||||
self.camera_config.zones[name].loitering_time
|
|
||||||
* self.camera_config.detect.fps
|
|
||||||
):
|
|
||||||
current_zones.append(name)
|
|
||||||
|
|
||||||
if name not in self.entered_zones:
|
|
||||||
self.entered_zones.append(name)
|
|
||||||
else:
|
|
||||||
self.zone_loitering[name] = loitering_score
|
|
||||||
else:
|
|
||||||
self.zone_presence[name] = zone_score
|
|
||||||
else:
|
|
||||||
# once an object has a zone inertia of 3+ it is not checked anymore
|
|
||||||
if 0 < zone_score < zone.inertia:
|
|
||||||
self.zone_presence[name] = zone_score - 1
|
|
||||||
|
|
||||||
# maintain attributes
|
|
||||||
for attr in obj_data["attributes"]:
|
|
||||||
if self.attributes[attr["label"]] < attr["score"]:
|
|
||||||
self.attributes[attr["label"]] = attr["score"]
|
|
||||||
|
|
||||||
# populate the sub_label for object with highest scoring logo
|
|
||||||
if self.obj_data["label"] in ["car", "package", "person"]:
|
|
||||||
recognized_logos = {
|
|
||||||
k: self.attributes[k]
|
|
||||||
for k in ["ups", "fedex", "amazon"]
|
|
||||||
if k in self.attributes
|
|
||||||
}
|
|
||||||
if len(recognized_logos) > 0:
|
|
||||||
max_logo = max(recognized_logos, key=recognized_logos.get)
|
|
||||||
|
|
||||||
# don't overwrite sub label if it is already set
|
|
||||||
if (
|
|
||||||
self.obj_data.get("sub_label") is None
|
|
||||||
or self.obj_data["sub_label"][0] == max_logo
|
|
||||||
):
|
|
||||||
self.obj_data["sub_label"] = (max_logo, recognized_logos[max_logo])
|
|
||||||
|
|
||||||
# check for significant change
|
|
||||||
if not self.false_positive:
|
|
||||||
# if the zones changed, signal an update
|
|
||||||
if set(self.current_zones) != set(current_zones):
|
|
||||||
significant_change = True
|
|
||||||
|
|
||||||
# if the position changed, signal an update
|
|
||||||
if self.obj_data["position_changes"] != obj_data["position_changes"]:
|
|
||||||
significant_change = True
|
|
||||||
|
|
||||||
if self.obj_data["attributes"] != obj_data["attributes"]:
|
|
||||||
significant_change = True
|
|
||||||
|
|
||||||
# if the state changed between stationary and active
|
|
||||||
if self.previous["active"] != self.active:
|
|
||||||
significant_change = True
|
|
||||||
|
|
||||||
# update at least once per minute
|
|
||||||
if self.obj_data["frame_time"] - self.previous["frame_time"] > 60:
|
|
||||||
significant_change = True
|
|
||||||
|
|
||||||
# update autotrack at most 3 objects per second
|
|
||||||
if self.obj_data["frame_time"] - self.previous["frame_time"] >= (1 / 3):
|
|
||||||
autotracker_update = True
|
|
||||||
|
|
||||||
self.obj_data.update(obj_data)
|
|
||||||
self.current_zones = current_zones
|
|
||||||
return (thumb_update, significant_change, autotracker_update)
|
|
||||||
|
|
||||||
def to_dict(self, include_thumbnail: bool = False):
|
|
||||||
event = {
|
|
||||||
"id": self.obj_data["id"],
|
|
||||||
"camera": self.camera,
|
|
||||||
"frame_time": self.obj_data["frame_time"],
|
|
||||||
"snapshot": self.thumbnail_data,
|
|
||||||
"label": self.obj_data["label"],
|
|
||||||
"sub_label": self.obj_data.get("sub_label"),
|
|
||||||
"top_score": self.top_score,
|
|
||||||
"false_positive": self.false_positive,
|
|
||||||
"start_time": self.obj_data["start_time"],
|
|
||||||
"end_time": self.obj_data.get("end_time", None),
|
|
||||||
"score": self.obj_data["score"],
|
|
||||||
"box": self.obj_data["box"],
|
|
||||||
"area": self.obj_data["area"],
|
|
||||||
"ratio": self.obj_data["ratio"],
|
|
||||||
"region": self.obj_data["region"],
|
|
||||||
"active": self.active,
|
|
||||||
"stationary": not self.active,
|
|
||||||
"motionless_count": self.obj_data["motionless_count"],
|
|
||||||
"position_changes": self.obj_data["position_changes"],
|
|
||||||
"current_zones": self.current_zones.copy(),
|
|
||||||
"entered_zones": self.entered_zones.copy(),
|
|
||||||
"has_clip": self.has_clip,
|
|
||||||
"has_snapshot": self.has_snapshot,
|
|
||||||
"attributes": self.attributes,
|
|
||||||
"current_attributes": self.obj_data["attributes"],
|
|
||||||
}
|
|
||||||
|
|
||||||
if include_thumbnail:
|
|
||||||
event["thumbnail"] = base64.b64encode(self.get_thumbnail()).decode("utf-8")
|
|
||||||
|
|
||||||
return event
|
|
||||||
|
|
||||||
def is_active(self):
|
|
||||||
return not self.is_stationary()
|
|
||||||
|
|
||||||
def is_stationary(self):
|
|
||||||
return (
|
|
||||||
self.obj_data["motionless_count"]
|
|
||||||
> self.camera_config.detect.stationary.threshold
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_thumbnail(self):
|
|
||||||
if (
|
|
||||||
self.thumbnail_data is None
|
|
||||||
or self.thumbnail_data["frame_time"] not in self.frame_cache
|
|
||||||
):
|
|
||||||
ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8))
|
|
||||||
|
|
||||||
jpg_bytes = self.get_jpg_bytes(
|
|
||||||
timestamp=False, bounding_box=False, crop=True, height=175
|
|
||||||
)
|
|
||||||
|
|
||||||
if jpg_bytes:
|
|
||||||
return jpg_bytes
|
|
||||||
else:
|
|
||||||
ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8))
|
|
||||||
return jpg.tobytes()
|
|
||||||
|
|
||||||
def get_clean_png(self):
|
|
||||||
if self.thumbnail_data is None:
|
|
||||||
return None
|
|
||||||
|
|
||||||
try:
|
|
||||||
best_frame = cv2.cvtColor(
|
|
||||||
self.frame_cache[self.thumbnail_data["frame_time"]],
|
|
||||||
cv2.COLOR_YUV2BGR_I420,
|
|
||||||
)
|
|
||||||
except KeyError:
|
|
||||||
logger.warning(
|
|
||||||
f"Unable to create clean png because frame {self.thumbnail_data['frame_time']} is not in the cache"
|
|
||||||
)
|
|
||||||
return None
|
|
||||||
|
|
||||||
ret, png = cv2.imencode(".png", best_frame)
|
|
||||||
if ret:
|
|
||||||
return png.tobytes()
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_jpg_bytes(
|
|
||||||
self, timestamp=False, bounding_box=False, crop=False, height=None, quality=70
|
|
||||||
):
|
|
||||||
if self.thumbnail_data is None:
|
|
||||||
return None
|
|
||||||
|
|
||||||
try:
|
|
||||||
best_frame = cv2.cvtColor(
|
|
||||||
self.frame_cache[self.thumbnail_data["frame_time"]],
|
|
||||||
cv2.COLOR_YUV2BGR_I420,
|
|
||||||
)
|
|
||||||
except KeyError:
|
|
||||||
logger.warning(
|
|
||||||
f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache"
|
|
||||||
)
|
|
||||||
return None
|
|
||||||
|
|
||||||
if bounding_box:
|
|
||||||
thickness = 2
|
|
||||||
color = self.colormap[self.obj_data["label"]]
|
|
||||||
|
|
||||||
# draw the bounding boxes on the frame
|
|
||||||
box = self.thumbnail_data["box"]
|
|
||||||
draw_box_with_label(
|
|
||||||
best_frame,
|
|
||||||
box[0],
|
|
||||||
box[1],
|
|
||||||
box[2],
|
|
||||||
box[3],
|
|
||||||
self.obj_data["label"],
|
|
||||||
f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}",
|
|
||||||
thickness=thickness,
|
|
||||||
color=color,
|
|
||||||
)
|
|
||||||
|
|
||||||
# draw any attributes
|
|
||||||
for attribute in self.thumbnail_data["attributes"]:
|
|
||||||
box = attribute["box"]
|
|
||||||
draw_box_with_label(
|
|
||||||
best_frame,
|
|
||||||
box[0],
|
|
||||||
box[1],
|
|
||||||
box[2],
|
|
||||||
box[3],
|
|
||||||
attribute["label"],
|
|
||||||
f"{attribute['score']:.0%}",
|
|
||||||
thickness=thickness,
|
|
||||||
color=color,
|
|
||||||
)
|
|
||||||
|
|
||||||
if crop:
|
|
||||||
box = self.thumbnail_data["box"]
|
|
||||||
box_size = 300
|
|
||||||
region = calculate_region(
|
|
||||||
best_frame.shape,
|
|
||||||
box[0],
|
|
||||||
box[1],
|
|
||||||
box[2],
|
|
||||||
box[3],
|
|
||||||
box_size,
|
|
||||||
multiplier=1.1,
|
|
||||||
)
|
|
||||||
best_frame = best_frame[region[1] : region[3], region[0] : region[2]]
|
|
||||||
|
|
||||||
if height:
|
|
||||||
width = int(height * best_frame.shape[1] / best_frame.shape[0])
|
|
||||||
best_frame = cv2.resize(
|
|
||||||
best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA
|
|
||||||
)
|
|
||||||
if timestamp:
|
|
||||||
color = self.camera_config.timestamp_style.color
|
|
||||||
draw_timestamp(
|
|
||||||
best_frame,
|
|
||||||
self.thumbnail_data["frame_time"],
|
|
||||||
self.camera_config.timestamp_style.format,
|
|
||||||
font_effect=self.camera_config.timestamp_style.effect,
|
|
||||||
font_thickness=self.camera_config.timestamp_style.thickness,
|
|
||||||
font_color=(color.blue, color.green, color.red),
|
|
||||||
position=self.camera_config.timestamp_style.position,
|
|
||||||
)
|
|
||||||
|
|
||||||
ret, jpg = cv2.imencode(
|
|
||||||
".jpg", best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), quality]
|
|
||||||
)
|
|
||||||
if ret:
|
|
||||||
return jpg.tobytes()
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def zone_filtered(obj: TrackedObject, object_config):
|
|
||||||
object_name = obj.obj_data["label"]
|
|
||||||
|
|
||||||
if object_name in object_config:
|
|
||||||
obj_settings = object_config[object_name]
|
|
||||||
|
|
||||||
# if the min area is larger than the
|
|
||||||
# detected object, don't add it to detected objects
|
|
||||||
if obj_settings.min_area > obj.obj_data["area"]:
|
|
||||||
return True
|
|
||||||
|
|
||||||
# if the detected object is larger than the
|
|
||||||
# max area, don't add it to detected objects
|
|
||||||
if obj_settings.max_area < obj.obj_data["area"]:
|
|
||||||
return True
|
|
||||||
|
|
||||||
# if the score is lower than the threshold, skip
|
|
||||||
if obj_settings.threshold > obj.computed_score:
|
|
||||||
return True
|
|
||||||
|
|
||||||
# if the object is not proportionally wide enough
|
|
||||||
if obj_settings.min_ratio > obj.obj_data["ratio"]:
|
|
||||||
return True
|
|
||||||
|
|
||||||
# if the object is proportionally too wide
|
|
||||||
if obj_settings.max_ratio < obj.obj_data["ratio"]:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
# Maintains the state of a camera
|
# Maintains the state of a camera
|
||||||
class CameraState:
|
class CameraState:
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -676,17 +233,18 @@ class CameraState:
|
|||||||
def on(self, event_type: str, callback: Callable[[dict], None]):
|
def on(self, event_type: str, callback: Callable[[dict], None]):
|
||||||
self.callbacks[event_type].append(callback)
|
self.callbacks[event_type].append(callback)
|
||||||
|
|
||||||
def update(self, frame_time, current_detections, motion_boxes, regions):
|
def update(
|
||||||
# get the new frame
|
self,
|
||||||
frame_id = f"{self.name}{frame_time}"
|
frame_name: str,
|
||||||
|
frame_time: float,
|
||||||
|
current_detections: dict[str, dict[str, any]],
|
||||||
|
motion_boxes: list[tuple[int, int, int, int]],
|
||||||
|
regions: list[tuple[int, int, int, int]],
|
||||||
|
):
|
||||||
current_frame = self.frame_manager.get(
|
current_frame = self.frame_manager.get(
|
||||||
frame_id, self.camera_config.frame_shape_yuv
|
frame_name, self.camera_config.frame_shape_yuv
|
||||||
)
|
)
|
||||||
|
|
||||||
if current_frame is None:
|
|
||||||
logger.debug(f"Failed to get frame {frame_id} from SHM")
|
|
||||||
|
|
||||||
tracked_objects = self.tracked_objects.copy()
|
tracked_objects = self.tracked_objects.copy()
|
||||||
current_ids = set(current_detections.keys())
|
current_ids = set(current_detections.keys())
|
||||||
previous_ids = set(tracked_objects.keys())
|
previous_ids = set(tracked_objects.keys())
|
||||||
@@ -696,8 +254,7 @@ class CameraState:
|
|||||||
|
|
||||||
for id in new_ids:
|
for id in new_ids:
|
||||||
new_obj = tracked_objects[id] = TrackedObject(
|
new_obj = tracked_objects[id] = TrackedObject(
|
||||||
self.name,
|
self.config.model,
|
||||||
self.config.model.colormap,
|
|
||||||
self.camera_config,
|
self.camera_config,
|
||||||
self.frame_cache,
|
self.frame_cache,
|
||||||
current_detections[id],
|
current_detections[id],
|
||||||
@@ -705,7 +262,7 @@ class CameraState:
|
|||||||
|
|
||||||
# call event handlers
|
# call event handlers
|
||||||
for c in self.callbacks["start"]:
|
for c in self.callbacks["start"]:
|
||||||
c(self.name, new_obj, frame_time)
|
c(self.name, new_obj, frame_name)
|
||||||
|
|
||||||
for id in updated_ids:
|
for id in updated_ids:
|
||||||
updated_obj = tracked_objects[id]
|
updated_obj = tracked_objects[id]
|
||||||
@@ -715,7 +272,7 @@ class CameraState:
|
|||||||
|
|
||||||
if autotracker_update or significant_update:
|
if autotracker_update or significant_update:
|
||||||
for c in self.callbacks["autotrack"]:
|
for c in self.callbacks["autotrack"]:
|
||||||
c(self.name, updated_obj, frame_time)
|
c(self.name, updated_obj, frame_name)
|
||||||
|
|
||||||
if thumb_update and current_frame is not None:
|
if thumb_update and current_frame is not None:
|
||||||
# ensure this frame is stored in the cache
|
# ensure this frame is stored in the cache
|
||||||
@@ -736,7 +293,7 @@ class CameraState:
|
|||||||
) or significant_update:
|
) or significant_update:
|
||||||
# call event handlers
|
# call event handlers
|
||||||
for c in self.callbacks["update"]:
|
for c in self.callbacks["update"]:
|
||||||
c(self.name, updated_obj, frame_time)
|
c(self.name, updated_obj, frame_name)
|
||||||
updated_obj.last_published = frame_time
|
updated_obj.last_published = frame_time
|
||||||
|
|
||||||
for id in removed_ids:
|
for id in removed_ids:
|
||||||
@@ -745,7 +302,7 @@ class CameraState:
|
|||||||
if "end_time" not in removed_obj.obj_data:
|
if "end_time" not in removed_obj.obj_data:
|
||||||
removed_obj.obj_data["end_time"] = frame_time
|
removed_obj.obj_data["end_time"] = frame_time
|
||||||
for c in self.callbacks["end"]:
|
for c in self.callbacks["end"]:
|
||||||
c(self.name, removed_obj, frame_time)
|
c(self.name, removed_obj, frame_name)
|
||||||
|
|
||||||
# TODO: can i switch to looking this up and only changing when an event ends?
|
# TODO: can i switch to looking this up and only changing when an event ends?
|
||||||
# maintain best objects
|
# maintain best objects
|
||||||
@@ -788,6 +345,7 @@ class CameraState:
|
|||||||
# if the object's thumbnail is not from the current frame, skip
|
# if the object's thumbnail is not from the current frame, skip
|
||||||
if (
|
if (
|
||||||
current_frame is None
|
current_frame is None
|
||||||
|
or obj.thumbnail_data is None
|
||||||
or obj.false_positive
|
or obj.false_positive
|
||||||
or obj.thumbnail_data["frame_time"] != frame_time
|
or obj.thumbnail_data["frame_time"] != frame_time
|
||||||
):
|
):
|
||||||
@@ -810,11 +368,11 @@ class CameraState:
|
|||||||
):
|
):
|
||||||
self.best_objects[object_type] = obj
|
self.best_objects[object_type] = obj
|
||||||
for c in self.callbacks["snapshot"]:
|
for c in self.callbacks["snapshot"]:
|
||||||
c(self.name, self.best_objects[object_type], frame_time)
|
c(self.name, self.best_objects[object_type], frame_name)
|
||||||
else:
|
else:
|
||||||
self.best_objects[object_type] = obj
|
self.best_objects[object_type] = obj
|
||||||
for c in self.callbacks["snapshot"]:
|
for c in self.callbacks["snapshot"]:
|
||||||
c(self.name, self.best_objects[object_type], frame_time)
|
c(self.name, self.best_objects[object_type], frame_name)
|
||||||
|
|
||||||
for c in self.callbacks["camera_activity"]:
|
for c in self.callbacks["camera_activity"]:
|
||||||
c(self.name, camera_activity)
|
c(self.name, camera_activity)
|
||||||
@@ -889,7 +447,7 @@ class CameraState:
|
|||||||
c(self.name, obj_name, 0)
|
c(self.name, obj_name, 0)
|
||||||
self.active_object_counts[obj_name] = 0
|
self.active_object_counts[obj_name] = 0
|
||||||
for c in self.callbacks["snapshot"]:
|
for c in self.callbacks["snapshot"]:
|
||||||
c(self.name, self.best_objects[obj_name], frame_time)
|
c(self.name, self.best_objects[obj_name], frame_name)
|
||||||
|
|
||||||
# cleanup thumbnail frame cache
|
# cleanup thumbnail frame cache
|
||||||
current_thumb_frames = {
|
current_thumb_frames = {
|
||||||
@@ -920,7 +478,7 @@ class CameraState:
|
|||||||
if self.previous_frame_id is not None:
|
if self.previous_frame_id is not None:
|
||||||
self.frame_manager.close(self.previous_frame_id)
|
self.frame_manager.close(self.previous_frame_id)
|
||||||
|
|
||||||
self.previous_frame_id = frame_id
|
self.previous_frame_id = frame_name
|
||||||
|
|
||||||
|
|
||||||
class TrackedObjectProcessor(threading.Thread):
|
class TrackedObjectProcessor(threading.Thread):
|
||||||
@@ -960,17 +518,18 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
self.zone_data = defaultdict(lambda: defaultdict(dict))
|
self.zone_data = defaultdict(lambda: defaultdict(dict))
|
||||||
self.active_zone_data = defaultdict(lambda: defaultdict(dict))
|
self.active_zone_data = defaultdict(lambda: defaultdict(dict))
|
||||||
|
|
||||||
def start(camera, obj: TrackedObject, current_frame_time):
|
def start(camera: str, obj: TrackedObject, frame_name: str):
|
||||||
self.event_sender.publish(
|
self.event_sender.publish(
|
||||||
(
|
(
|
||||||
EventTypeEnum.tracked_object,
|
EventTypeEnum.tracked_object,
|
||||||
EventStateEnum.start,
|
EventStateEnum.start,
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
obj.to_dict(),
|
obj.to_dict(),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
def update(camera, obj: TrackedObject, current_frame_time):
|
def update(camera: str, obj: TrackedObject, frame_name: str):
|
||||||
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
||||||
obj.has_clip = self.should_retain_recording(camera, obj)
|
obj.has_clip = self.should_retain_recording(camera, obj)
|
||||||
after = obj.to_dict()
|
after = obj.to_dict()
|
||||||
@@ -986,14 +545,15 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
EventTypeEnum.tracked_object,
|
EventTypeEnum.tracked_object,
|
||||||
EventStateEnum.update,
|
EventStateEnum.update,
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
obj.to_dict(include_thumbnail=True),
|
obj.to_dict(include_thumbnail=True),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
def autotrack(camera, obj: TrackedObject, current_frame_time):
|
def autotrack(camera: str, obj: TrackedObject, frame_name: str):
|
||||||
self.ptz_autotracker_thread.ptz_autotracker.autotrack_object(camera, obj)
|
self.ptz_autotracker_thread.ptz_autotracker.autotrack_object(camera, obj)
|
||||||
|
|
||||||
def end(camera, obj: TrackedObject, current_frame_time):
|
def end(camera: str, obj: TrackedObject, frame_name: str):
|
||||||
# populate has_snapshot
|
# populate has_snapshot
|
||||||
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
||||||
obj.has_clip = self.should_retain_recording(camera, obj)
|
obj.has_clip = self.should_retain_recording(camera, obj)
|
||||||
@@ -1048,11 +608,12 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
EventTypeEnum.tracked_object,
|
EventTypeEnum.tracked_object,
|
||||||
EventStateEnum.end,
|
EventStateEnum.end,
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
obj.to_dict(include_thumbnail=True),
|
obj.to_dict(include_thumbnail=True),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
def snapshot(camera, obj: TrackedObject, current_frame_time):
|
def snapshot(camera, obj: TrackedObject, frame_name: str):
|
||||||
mqtt_config: MqttConfig = self.config.cameras[camera].mqtt
|
mqtt_config: MqttConfig = self.config.cameras[camera].mqtt
|
||||||
if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
|
if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
|
||||||
jpg_bytes = obj.get_jpg_bytes(
|
jpg_bytes = obj.get_jpg_bytes(
|
||||||
@@ -1157,7 +718,8 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
)
|
)
|
||||||
and (
|
and (
|
||||||
not review_config.detections.required_zones
|
not review_config.detections.required_zones
|
||||||
or set(obj.entered_zones) & set(review_config.alerts.required_zones)
|
or set(obj.entered_zones)
|
||||||
|
& set(review_config.detections.required_zones)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
):
|
):
|
||||||
@@ -1240,6 +802,7 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
try:
|
try:
|
||||||
(
|
(
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
frame_time,
|
frame_time,
|
||||||
current_tracked_objects,
|
current_tracked_objects,
|
||||||
motion_boxes,
|
motion_boxes,
|
||||||
@@ -1251,7 +814,7 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
camera_state = self.camera_states[camera]
|
camera_state = self.camera_states[camera]
|
||||||
|
|
||||||
camera_state.update(
|
camera_state.update(
|
||||||
frame_time, current_tracked_objects, motion_boxes, regions
|
frame_name, frame_time, current_tracked_objects, motion_boxes, regions
|
||||||
)
|
)
|
||||||
|
|
||||||
self.update_mqtt_motion(camera, frame_time, motion_boxes)
|
self.update_mqtt_motion(camera, frame_time, motion_boxes)
|
||||||
@@ -1264,6 +827,7 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
self.detection_publisher.publish(
|
self.detection_publisher.publish(
|
||||||
(
|
(
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
frame_time,
|
frame_time,
|
||||||
tracked_objects,
|
tracked_objects,
|
||||||
motion_boxes,
|
motion_boxes,
|
||||||
|
|||||||
@@ -268,12 +268,10 @@ class BirdsEyeFrameManager:
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
config: FrigateConfig,
|
config: FrigateConfig,
|
||||||
frame_manager: SharedMemoryFrameManager,
|
|
||||||
stop_event: mp.Event,
|
stop_event: mp.Event,
|
||||||
):
|
):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.mode = config.birdseye.mode
|
self.mode = config.birdseye.mode
|
||||||
self.frame_manager = frame_manager
|
|
||||||
width, height = get_canvas_shape(config.birdseye.width, config.birdseye.height)
|
width, height = get_canvas_shape(config.birdseye.width, config.birdseye.height)
|
||||||
self.frame_shape = (height, width)
|
self.frame_shape = (height, width)
|
||||||
self.yuv_shape = (height * 3 // 2, width)
|
self.yuv_shape = (height * 3 // 2, width)
|
||||||
@@ -351,18 +349,13 @@ class BirdsEyeFrameManager:
|
|||||||
logger.debug("Clearing the birdseye frame")
|
logger.debug("Clearing the birdseye frame")
|
||||||
self.frame[:] = self.blank_frame
|
self.frame[:] = self.blank_frame
|
||||||
|
|
||||||
def copy_to_position(self, position, camera=None, frame_time=None):
|
def copy_to_position(self, position, camera=None, frame: np.ndarray = None):
|
||||||
if camera is None:
|
if camera is None:
|
||||||
frame = None
|
frame = None
|
||||||
channel_dims = None
|
channel_dims = None
|
||||||
else:
|
else:
|
||||||
frame_id = f"{camera}{frame_time}"
|
|
||||||
frame = self.frame_manager.get(
|
|
||||||
frame_id, self.config.cameras[camera].frame_shape_yuv
|
|
||||||
)
|
|
||||||
|
|
||||||
if frame is None:
|
if frame is None:
|
||||||
logger.debug(f"Unable to copy frame {camera}{frame_time} to birdseye.")
|
logger.debug(f"Unable to copy frame {camera} to birdseye.")
|
||||||
return
|
return
|
||||||
|
|
||||||
channel_dims = self.cameras[camera]["channel_dims"]
|
channel_dims = self.cameras[camera]["channel_dims"]
|
||||||
@@ -375,8 +368,6 @@ class BirdsEyeFrameManager:
|
|||||||
channel_dims,
|
channel_dims,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.frame_manager.close(frame_id)
|
|
||||||
|
|
||||||
def camera_active(self, mode, object_box_count, motion_box_count):
|
def camera_active(self, mode, object_box_count, motion_box_count):
|
||||||
if mode == BirdseyeModeEnum.continuous:
|
if mode == BirdseyeModeEnum.continuous:
|
||||||
return True
|
return True
|
||||||
@@ -387,7 +378,7 @@ class BirdsEyeFrameManager:
|
|||||||
if mode == BirdseyeModeEnum.objects and object_box_count > 0:
|
if mode == BirdseyeModeEnum.objects and object_box_count > 0:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def update_frame(self):
|
def update_frame(self, frame: np.ndarray):
|
||||||
"""Update to a new frame for birdseye."""
|
"""Update to a new frame for birdseye."""
|
||||||
|
|
||||||
# determine how many cameras are tracking objects within the last inactivity_threshold seconds
|
# determine how many cameras are tracking objects within the last inactivity_threshold seconds
|
||||||
@@ -397,7 +388,7 @@ class BirdsEyeFrameManager:
|
|||||||
for cam, cam_data in self.cameras.items()
|
for cam, cam_data in self.cameras.items()
|
||||||
if self.config.cameras[cam].birdseye.enabled
|
if self.config.cameras[cam].birdseye.enabled
|
||||||
and cam_data["last_active_frame"] > 0
|
and cam_data["last_active_frame"] > 0
|
||||||
and cam_data["current_frame"] - cam_data["last_active_frame"]
|
and cam_data["current_frame_time"] - cam_data["last_active_frame"]
|
||||||
< self.inactivity_threshold
|
< self.inactivity_threshold
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
@@ -414,7 +405,7 @@ class BirdsEyeFrameManager:
|
|||||||
limited_active_cameras = sorted(
|
limited_active_cameras = sorted(
|
||||||
active_cameras,
|
active_cameras,
|
||||||
key=lambda active_camera: (
|
key=lambda active_camera: (
|
||||||
self.cameras[active_camera]["current_frame"]
|
self.cameras[active_camera]["current_frame_time"]
|
||||||
- self.cameras[active_camera]["last_active_frame"]
|
- self.cameras[active_camera]["last_active_frame"]
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
@@ -524,7 +515,9 @@ class BirdsEyeFrameManager:
|
|||||||
for row in self.camera_layout:
|
for row in self.camera_layout:
|
||||||
for position in row:
|
for position in row:
|
||||||
self.copy_to_position(
|
self.copy_to_position(
|
||||||
position[1], position[0], self.cameras[position[0]]["current_frame"]
|
position[1],
|
||||||
|
position[0],
|
||||||
|
self.cameras[position[0]]["current_frame"],
|
||||||
)
|
)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@@ -672,7 +665,14 @@ class BirdsEyeFrameManager:
|
|||||||
else:
|
else:
|
||||||
return standard_candidate_layout
|
return standard_candidate_layout
|
||||||
|
|
||||||
def update(self, camera, object_count, motion_count, frame_time, frame) -> bool:
|
def update(
|
||||||
|
self,
|
||||||
|
camera: str,
|
||||||
|
object_count: int,
|
||||||
|
motion_count: int,
|
||||||
|
frame_time: float,
|
||||||
|
frame: np.ndarray,
|
||||||
|
) -> bool:
|
||||||
# don't process if birdseye is disabled for this camera
|
# don't process if birdseye is disabled for this camera
|
||||||
camera_config = self.config.cameras[camera].birdseye
|
camera_config = self.config.cameras[camera].birdseye
|
||||||
|
|
||||||
@@ -689,7 +689,8 @@ class BirdsEyeFrameManager:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
# update the last active frame for the camera
|
# update the last active frame for the camera
|
||||||
self.cameras[camera]["current_frame"] = frame_time
|
self.cameras[camera]["current_frame"] = frame.copy()
|
||||||
|
self.cameras[camera]["current_frame_time"] = frame_time
|
||||||
if self.camera_active(camera_config.mode, object_count, motion_count):
|
if self.camera_active(camera_config.mode, object_count, motion_count):
|
||||||
self.cameras[camera]["last_active_frame"] = frame_time
|
self.cameras[camera]["last_active_frame"] = frame_time
|
||||||
|
|
||||||
@@ -700,7 +701,7 @@ class BirdsEyeFrameManager:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
updated_frame = self.update_frame()
|
updated_frame = self.update_frame(frame)
|
||||||
except Exception:
|
except Exception:
|
||||||
updated_frame = False
|
updated_frame = False
|
||||||
self.active_cameras = []
|
self.active_cameras = []
|
||||||
@@ -737,12 +738,12 @@ class Birdseye:
|
|||||||
self.broadcaster = BroadcastThread(
|
self.broadcaster = BroadcastThread(
|
||||||
"birdseye", self.converter, websocket_server, stop_event
|
"birdseye", self.converter, websocket_server, stop_event
|
||||||
)
|
)
|
||||||
frame_manager = SharedMemoryFrameManager()
|
self.birdseye_manager = BirdsEyeFrameManager(config, stop_event)
|
||||||
self.birdseye_manager = BirdsEyeFrameManager(config, frame_manager, stop_event)
|
|
||||||
self.config_subscriber = ConfigSubscriber("config/birdseye/")
|
self.config_subscriber = ConfigSubscriber("config/birdseye/")
|
||||||
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
|
|
||||||
if config.birdseye.restream:
|
if config.birdseye.restream:
|
||||||
self.birdseye_buffer = frame_manager.create(
|
self.birdseye_buffer = self.frame_manager.create(
|
||||||
"birdseye",
|
"birdseye",
|
||||||
self.birdseye_manager.yuv_shape[0] * self.birdseye_manager.yuv_shape[1],
|
self.birdseye_manager.yuv_shape[0] * self.birdseye_manager.yuv_shape[1],
|
||||||
)
|
)
|
||||||
@@ -756,7 +757,7 @@ class Birdseye:
|
|||||||
current_tracked_objects: list[dict[str, any]],
|
current_tracked_objects: list[dict[str, any]],
|
||||||
motion_boxes: list[list[int]],
|
motion_boxes: list[list[int]],
|
||||||
frame_time: float,
|
frame_time: float,
|
||||||
frame,
|
frame: np.ndarray,
|
||||||
) -> None:
|
) -> None:
|
||||||
# check if there is an updated config
|
# check if there is an updated config
|
||||||
while True:
|
while True:
|
||||||
|
|||||||
@@ -63,6 +63,7 @@ def output_frames(
|
|||||||
birdseye: Optional[Birdseye] = None
|
birdseye: Optional[Birdseye] = None
|
||||||
preview_recorders: dict[str, PreviewRecorder] = {}
|
preview_recorders: dict[str, PreviewRecorder] = {}
|
||||||
preview_write_times: dict[str, float] = {}
|
preview_write_times: dict[str, float] = {}
|
||||||
|
failed_frame_requests: dict[str, int] = {}
|
||||||
|
|
||||||
move_preview_frames("cache")
|
move_preview_frames("cache")
|
||||||
|
|
||||||
@@ -87,19 +88,27 @@ def output_frames(
|
|||||||
|
|
||||||
(
|
(
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
frame_time,
|
frame_time,
|
||||||
current_tracked_objects,
|
current_tracked_objects,
|
||||||
motion_boxes,
|
motion_boxes,
|
||||||
regions,
|
_,
|
||||||
) = data
|
) = data
|
||||||
|
|
||||||
frame_id = f"{camera}{frame_time}"
|
frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv)
|
||||||
|
|
||||||
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
|
|
||||||
|
|
||||||
if frame is None:
|
if frame is None:
|
||||||
logger.debug(f"Failed to get frame {frame_id} from SHM")
|
logger.debug(f"Failed to get frame {frame_name} from SHM")
|
||||||
|
failed_frame_requests[camera] = failed_frame_requests.get(camera, 0) + 1
|
||||||
|
|
||||||
|
if failed_frame_requests[camera] > config.cameras[camera].detect.fps:
|
||||||
|
logger.warning(
|
||||||
|
f"Failed to retrieve many frames for {camera} from SHM, consider increasing SHM size if this continues."
|
||||||
|
)
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
else:
|
||||||
|
failed_frame_requests[camera] = 0
|
||||||
|
|
||||||
# send camera frame to ffmpeg process if websockets are connected
|
# send camera frame to ffmpeg process if websockets are connected
|
||||||
if any(
|
if any(
|
||||||
@@ -134,12 +143,15 @@ def output_frames(
|
|||||||
# check for any cameras that are currently offline
|
# check for any cameras that are currently offline
|
||||||
# and need to generate a preview
|
# and need to generate a preview
|
||||||
if generated_preview:
|
if generated_preview:
|
||||||
|
logger.debug(
|
||||||
|
"Checking for offline cameras because another camera generated a preview."
|
||||||
|
)
|
||||||
for camera, time in preview_write_times.copy().items():
|
for camera, time in preview_write_times.copy().items():
|
||||||
if time != 0 and frame_time - time > 10:
|
if time != 0 and frame_time - time > 10:
|
||||||
preview_recorders[camera].flag_offline(frame_time)
|
preview_recorders[camera].flag_offline(frame_time)
|
||||||
preview_write_times[camera] = frame_time
|
preview_write_times[camera] = frame_time
|
||||||
|
|
||||||
frame_manager.close(frame_id)
|
frame_manager.close(frame_name)
|
||||||
|
|
||||||
move_preview_frames("clips")
|
move_preview_frames("clips")
|
||||||
|
|
||||||
@@ -151,15 +163,15 @@ def output_frames(
|
|||||||
|
|
||||||
(
|
(
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
frame_time,
|
frame_time,
|
||||||
current_tracked_objects,
|
current_tracked_objects,
|
||||||
motion_boxes,
|
motion_boxes,
|
||||||
regions,
|
regions,
|
||||||
) = data
|
) = data
|
||||||
|
|
||||||
frame_id = f"{camera}{frame_time}"
|
frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv)
|
||||||
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
|
frame_manager.close(frame_name)
|
||||||
frame_manager.close(frame_id)
|
|
||||||
|
|
||||||
detection_subscriber.stop()
|
detection_subscriber.stop()
|
||||||
|
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ class FFMpegConverter(threading.Thread):
|
|||||||
# write a PREVIEW at fps and 1 key frame per clip
|
# write a PREVIEW at fps and 1 key frame per clip
|
||||||
self.ffmpeg_cmd = parse_preset_hardware_acceleration_encode(
|
self.ffmpeg_cmd = parse_preset_hardware_acceleration_encode(
|
||||||
config.ffmpeg.ffmpeg_path,
|
config.ffmpeg.ffmpeg_path,
|
||||||
config.ffmpeg.hwaccel_args,
|
"default",
|
||||||
input="-f concat -y -protocol_whitelist pipe,file -safe 0 -threads 1 -i /dev/stdin",
|
input="-f concat -y -protocol_whitelist pipe,file -safe 0 -threads 1 -i /dev/stdin",
|
||||||
output=f"-threads 1 -g {PREVIEW_KEYFRAME_INTERVAL} -bf 0 -b:v {PREVIEW_QUALITY_BIT_RATES[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}",
|
output=f"-threads 1 -g {PREVIEW_KEYFRAME_INTERVAL} -bf 0 -b:v {PREVIEW_QUALITY_BIT_RATES[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}",
|
||||||
type=EncodeTypeEnum.preview,
|
type=EncodeTypeEnum.preview,
|
||||||
@@ -154,6 +154,7 @@ class PreviewRecorder:
|
|||||||
self.start_time = 0
|
self.start_time = 0
|
||||||
self.last_output_time = 0
|
self.last_output_time = 0
|
||||||
self.output_frames = []
|
self.output_frames = []
|
||||||
|
|
||||||
if config.detect.width > config.detect.height:
|
if config.detect.width > config.detect.height:
|
||||||
self.out_height = PREVIEW_HEIGHT
|
self.out_height = PREVIEW_HEIGHT
|
||||||
self.out_width = (
|
self.out_width = (
|
||||||
@@ -274,7 +275,7 @@ class PreviewRecorder:
|
|||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def write_frame_to_cache(self, frame_time: float, frame) -> None:
|
def write_frame_to_cache(self, frame_time: float, frame: np.ndarray) -> None:
|
||||||
# resize yuv frame
|
# resize yuv frame
|
||||||
small_frame = np.zeros((self.out_height * 3 // 2, self.out_width), np.uint8)
|
small_frame = np.zeros((self.out_height * 3 // 2, self.out_width), np.uint8)
|
||||||
copy_yuv_to_position(
|
copy_yuv_to_position(
|
||||||
@@ -303,7 +304,7 @@ class PreviewRecorder:
|
|||||||
current_tracked_objects: list[dict[str, any]],
|
current_tracked_objects: list[dict[str, any]],
|
||||||
motion_boxes: list[list[int]],
|
motion_boxes: list[list[int]],
|
||||||
frame_time: float,
|
frame_time: float,
|
||||||
frame,
|
frame: np.ndarray,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
# check for updated record config
|
# check for updated record config
|
||||||
_, updated_record_config = self.config_subscriber.check_for_update()
|
_, updated_record_config = self.config_subscriber.check_for_update()
|
||||||
@@ -332,6 +333,10 @@ class PreviewRecorder:
|
|||||||
self.output_frames,
|
self.output_frames,
|
||||||
self.requestor,
|
self.requestor,
|
||||||
).start()
|
).start()
|
||||||
|
else:
|
||||||
|
logger.debug(
|
||||||
|
f"Not saving preview for {self.config.name} because there are no saved frames."
|
||||||
|
)
|
||||||
|
|
||||||
# reset frame cache
|
# reset frame cache
|
||||||
self.segment_end = (
|
self.segment_end = (
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ from frigate.const import (
|
|||||||
CONFIG_DIR,
|
CONFIG_DIR,
|
||||||
)
|
)
|
||||||
from frigate.ptz.onvif import OnvifController
|
from frigate.ptz.onvif import OnvifController
|
||||||
|
from frigate.track.tracked_object import TrackedObject
|
||||||
from frigate.util.builtin import update_yaml_file
|
from frigate.util.builtin import update_yaml_file
|
||||||
from frigate.util.image import SharedMemoryFrameManager, intersection_over_union
|
from frigate.util.image import SharedMemoryFrameManager, intersection_over_union
|
||||||
|
|
||||||
@@ -58,7 +59,13 @@ class PtzMotionEstimator:
|
|||||||
self.ptz_metrics.reset.set()
|
self.ptz_metrics.reset.set()
|
||||||
logger.debug(f"{config.name}: Motion estimator init")
|
logger.debug(f"{config.name}: Motion estimator init")
|
||||||
|
|
||||||
def motion_estimator(self, detections, frame_time, camera):
|
def motion_estimator(
|
||||||
|
self,
|
||||||
|
detections: list[dict[str, any]],
|
||||||
|
frame_name: str,
|
||||||
|
frame_time: float,
|
||||||
|
camera: str,
|
||||||
|
):
|
||||||
# If we've just started up or returned to our preset, reset motion estimator for new tracking session
|
# If we've just started up or returned to our preset, reset motion estimator for new tracking session
|
||||||
if self.ptz_metrics.reset.is_set():
|
if self.ptz_metrics.reset.is_set():
|
||||||
self.ptz_metrics.reset.clear()
|
self.ptz_metrics.reset.clear()
|
||||||
@@ -91,9 +98,8 @@ class PtzMotionEstimator:
|
|||||||
f"{camera}: Motion estimator running - frame time: {frame_time}"
|
f"{camera}: Motion estimator running - frame time: {frame_time}"
|
||||||
)
|
)
|
||||||
|
|
||||||
frame_id = f"{camera}{frame_time}"
|
|
||||||
yuv_frame = self.frame_manager.get(
|
yuv_frame = self.frame_manager.get(
|
||||||
frame_id, self.camera_config.frame_shape_yuv
|
frame_name, self.camera_config.frame_shape_yuv
|
||||||
)
|
)
|
||||||
|
|
||||||
if yuv_frame is None:
|
if yuv_frame is None:
|
||||||
@@ -135,7 +141,7 @@ class PtzMotionEstimator:
|
|||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
self.frame_manager.close(frame_id)
|
self.frame_manager.close(frame_name)
|
||||||
|
|
||||||
return self.coord_transformations
|
return self.coord_transformations
|
||||||
|
|
||||||
@@ -214,7 +220,7 @@ class PtzAutoTracker:
|
|||||||
):
|
):
|
||||||
self._autotracker_setup(camera_config, camera)
|
self._autotracker_setup(camera_config, camera)
|
||||||
|
|
||||||
def _autotracker_setup(self, camera_config, camera):
|
def _autotracker_setup(self, camera_config: CameraConfig, camera: str):
|
||||||
logger.debug(f"{camera}: Autotracker init")
|
logger.debug(f"{camera}: Autotracker init")
|
||||||
|
|
||||||
self.object_types[camera] = camera_config.onvif.autotracking.track
|
self.object_types[camera] = camera_config.onvif.autotracking.track
|
||||||
@@ -852,7 +858,7 @@ class PtzAutoTracker:
|
|||||||
logger.debug(f"{camera}: Valid velocity ")
|
logger.debug(f"{camera}: Valid velocity ")
|
||||||
return True, velocities.flatten()
|
return True, velocities.flatten()
|
||||||
|
|
||||||
def _get_distance_threshold(self, camera, obj):
|
def _get_distance_threshold(self, camera: str, obj: TrackedObject):
|
||||||
# Returns true if Euclidean distance from object to center of frame is
|
# Returns true if Euclidean distance from object to center of frame is
|
||||||
# less than 10% of the of the larger dimension (width or height) of the frame,
|
# less than 10% of the of the larger dimension (width or height) of the frame,
|
||||||
# multiplied by a scaling factor for object size.
|
# multiplied by a scaling factor for object size.
|
||||||
@@ -888,7 +894,9 @@ class PtzAutoTracker:
|
|||||||
|
|
||||||
return distance_threshold
|
return distance_threshold
|
||||||
|
|
||||||
def _should_zoom_in(self, camera, obj, box, predicted_time, debug_zooming=False):
|
def _should_zoom_in(
|
||||||
|
self, camera: str, obj: TrackedObject, box, predicted_time, debug_zooming=False
|
||||||
|
):
|
||||||
# returns True if we should zoom in, False if we should zoom out, None to do nothing
|
# returns True if we should zoom in, False if we should zoom out, None to do nothing
|
||||||
camera_config = self.config.cameras[camera]
|
camera_config = self.config.cameras[camera]
|
||||||
camera_width = camera_config.frame_shape[1]
|
camera_width = camera_config.frame_shape[1]
|
||||||
@@ -1019,7 +1027,7 @@ class PtzAutoTracker:
|
|||||||
# Don't zoom at all
|
# Don't zoom at all
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _autotrack_move_ptz(self, camera, obj):
|
def _autotrack_move_ptz(self, camera: str, obj: TrackedObject):
|
||||||
camera_config = self.config.cameras[camera]
|
camera_config = self.config.cameras[camera]
|
||||||
camera_width = camera_config.frame_shape[1]
|
camera_width = camera_config.frame_shape[1]
|
||||||
camera_height = camera_config.frame_shape[0]
|
camera_height = camera_config.frame_shape[0]
|
||||||
@@ -1090,7 +1098,12 @@ class PtzAutoTracker:
|
|||||||
self._enqueue_move(camera, obj.obj_data["frame_time"], 0, 0, zoom)
|
self._enqueue_move(camera, obj.obj_data["frame_time"], 0, 0, zoom)
|
||||||
|
|
||||||
def _get_zoom_amount(
|
def _get_zoom_amount(
|
||||||
self, camera, obj, predicted_box, predicted_movement_time, debug_zoom=True
|
self,
|
||||||
|
camera: str,
|
||||||
|
obj: TrackedObject,
|
||||||
|
predicted_box,
|
||||||
|
predicted_movement_time,
|
||||||
|
debug_zoom=True,
|
||||||
):
|
):
|
||||||
camera_config = self.config.cameras[camera]
|
camera_config = self.config.cameras[camera]
|
||||||
|
|
||||||
@@ -1186,13 +1199,13 @@ class PtzAutoTracker:
|
|||||||
|
|
||||||
return zoom
|
return zoom
|
||||||
|
|
||||||
def is_autotracking(self, camera):
|
def is_autotracking(self, camera: str):
|
||||||
return self.tracked_object[camera] is not None
|
return self.tracked_object[camera] is not None
|
||||||
|
|
||||||
def autotracked_object_region(self, camera):
|
def autotracked_object_region(self, camera: str):
|
||||||
return self.tracked_object[camera]["region"]
|
return self.tracked_object[camera]["region"]
|
||||||
|
|
||||||
def autotrack_object(self, camera, obj):
|
def autotrack_object(self, camera: str, obj: TrackedObject):
|
||||||
camera_config = self.config.cameras[camera]
|
camera_config = self.config.cameras[camera]
|
||||||
|
|
||||||
if camera_config.onvif.autotracking.enabled:
|
if camera_config.onvif.autotracking.enabled:
|
||||||
@@ -1208,7 +1221,7 @@ class PtzAutoTracker:
|
|||||||
if (
|
if (
|
||||||
# new object
|
# new object
|
||||||
self.tracked_object[camera] is None
|
self.tracked_object[camera] is None
|
||||||
and obj.camera == camera
|
and obj.camera_config.name == camera
|
||||||
and obj.obj_data["label"] in self.object_types[camera]
|
and obj.obj_data["label"] in self.object_types[camera]
|
||||||
and set(obj.entered_zones) & set(self.required_zones[camera])
|
and set(obj.entered_zones) & set(self.required_zones[camera])
|
||||||
and not obj.previous["false_positive"]
|
and not obj.previous["false_positive"]
|
||||||
@@ -1267,7 +1280,7 @@ class PtzAutoTracker:
|
|||||||
# If it's within bounds, start tracking that object.
|
# If it's within bounds, start tracking that object.
|
||||||
# Should we check region (maybe too broad) or expand the previous object's box a bit and check that?
|
# Should we check region (maybe too broad) or expand the previous object's box a bit and check that?
|
||||||
self.tracked_object[camera] is None
|
self.tracked_object[camera] is None
|
||||||
and obj.camera == camera
|
and obj.camera_config.name == camera
|
||||||
and obj.obj_data["label"] in self.object_types[camera]
|
and obj.obj_data["label"] in self.object_types[camera]
|
||||||
and not obj.previous["false_positive"]
|
and not obj.previous["false_positive"]
|
||||||
and not obj.false_positive
|
and not obj.false_positive
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ from frigate.ffmpeg_presets import (
|
|||||||
parse_preset_hardware_acceleration_encode,
|
parse_preset_hardware_acceleration_encode,
|
||||||
)
|
)
|
||||||
from frigate.models import Export, Previews, Recordings
|
from frigate.models import Export, Previews, Recordings
|
||||||
|
from frigate.util.builtin import is_current_hour
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -43,6 +44,11 @@ class PlaybackFactorEnum(str, Enum):
|
|||||||
timelapse_25x = "timelapse_25x"
|
timelapse_25x = "timelapse_25x"
|
||||||
|
|
||||||
|
|
||||||
|
class PlaybackSourceEnum(str, Enum):
|
||||||
|
recordings = "recordings"
|
||||||
|
preview = "preview"
|
||||||
|
|
||||||
|
|
||||||
class RecordingExporter(threading.Thread):
|
class RecordingExporter(threading.Thread):
|
||||||
"""Exports a specific set of recordings for a camera to storage as a single file."""
|
"""Exports a specific set of recordings for a camera to storage as a single file."""
|
||||||
|
|
||||||
@@ -56,6 +62,7 @@ class RecordingExporter(threading.Thread):
|
|||||||
start_time: int,
|
start_time: int,
|
||||||
end_time: int,
|
end_time: int,
|
||||||
playback_factor: PlaybackFactorEnum,
|
playback_factor: PlaybackFactorEnum,
|
||||||
|
playback_source: PlaybackSourceEnum,
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.config = config
|
self.config = config
|
||||||
@@ -66,6 +73,7 @@ class RecordingExporter(threading.Thread):
|
|||||||
self.start_time = start_time
|
self.start_time = start_time
|
||||||
self.end_time = end_time
|
self.end_time = end_time
|
||||||
self.playback_factor = playback_factor
|
self.playback_factor = playback_factor
|
||||||
|
self.playback_source = playback_source
|
||||||
|
|
||||||
# ensure export thumb dir
|
# ensure export thumb dir
|
||||||
Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True)
|
Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True)
|
||||||
@@ -170,30 +178,7 @@ class RecordingExporter(threading.Thread):
|
|||||||
|
|
||||||
return thumb_path
|
return thumb_path
|
||||||
|
|
||||||
def run(self) -> None:
|
def get_record_export_command(self, video_path: str) -> list[str]:
|
||||||
logger.debug(
|
|
||||||
f"Beginning export for {self.camera} from {self.start_time} to {self.end_time}"
|
|
||||||
)
|
|
||||||
export_name = (
|
|
||||||
self.user_provided_name
|
|
||||||
or f"{self.camera.replace('_', ' ')} {self.get_datetime_from_timestamp(self.start_time)} {self.get_datetime_from_timestamp(self.end_time)}"
|
|
||||||
)
|
|
||||||
video_path = f"{EXPORT_DIR}/{self.export_id}.mp4"
|
|
||||||
|
|
||||||
thumb_path = self.save_thumbnail(self.export_id)
|
|
||||||
|
|
||||||
Export.insert(
|
|
||||||
{
|
|
||||||
Export.id: self.export_id,
|
|
||||||
Export.camera: self.camera,
|
|
||||||
Export.name: export_name,
|
|
||||||
Export.date: self.start_time,
|
|
||||||
Export.video_path: video_path,
|
|
||||||
Export.thumb_path: thumb_path,
|
|
||||||
Export.in_progress: True,
|
|
||||||
}
|
|
||||||
).execute()
|
|
||||||
|
|
||||||
if (self.end_time - self.start_time) <= MAX_PLAYLIST_SECONDS:
|
if (self.end_time - self.start_time) <= MAX_PLAYLIST_SECONDS:
|
||||||
playlist_lines = f"http://127.0.0.1:5000/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8"
|
playlist_lines = f"http://127.0.0.1:5000/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8"
|
||||||
ffmpeg_input = (
|
ffmpeg_input = (
|
||||||
@@ -204,7 +189,10 @@ class RecordingExporter(threading.Thread):
|
|||||||
|
|
||||||
# get full set of recordings
|
# get full set of recordings
|
||||||
export_recordings = (
|
export_recordings = (
|
||||||
Recordings.select()
|
Recordings.select(
|
||||||
|
Recordings.start_time,
|
||||||
|
Recordings.end_time,
|
||||||
|
)
|
||||||
.where(
|
.where(
|
||||||
Recordings.start_time.between(self.start_time, self.end_time)
|
Recordings.start_time.between(self.start_time, self.end_time)
|
||||||
| Recordings.end_time.between(self.start_time, self.end_time)
|
| Recordings.end_time.between(self.start_time, self.end_time)
|
||||||
@@ -233,6 +221,91 @@ class RecordingExporter(threading.Thread):
|
|||||||
ffmpeg_cmd = (
|
ffmpeg_cmd = (
|
||||||
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart {video_path}"
|
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart {video_path}"
|
||||||
).split(" ")
|
).split(" ")
|
||||||
|
elif self.playback_factor == PlaybackFactorEnum.timelapse_25x:
|
||||||
|
ffmpeg_cmd = (
|
||||||
|
parse_preset_hardware_acceleration_encode(
|
||||||
|
self.config.ffmpeg.ffmpeg_path,
|
||||||
|
self.config.ffmpeg.hwaccel_args,
|
||||||
|
f"-an {ffmpeg_input}",
|
||||||
|
f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart {video_path}",
|
||||||
|
EncodeTypeEnum.timelapse,
|
||||||
|
)
|
||||||
|
).split(" ")
|
||||||
|
|
||||||
|
return ffmpeg_cmd, playlist_lines
|
||||||
|
|
||||||
|
def get_preview_export_command(self, video_path: str) -> list[str]:
|
||||||
|
playlist_lines = []
|
||||||
|
codec = "-c copy"
|
||||||
|
|
||||||
|
if is_current_hour(self.start_time):
|
||||||
|
# get list of current preview frames
|
||||||
|
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
|
||||||
|
file_start = f"preview_{self.camera}"
|
||||||
|
start_file = f"{file_start}-{self.start_time}.{PREVIEW_FRAME_TYPE}"
|
||||||
|
end_file = f"{file_start}-{self.end_time}.{PREVIEW_FRAME_TYPE}"
|
||||||
|
|
||||||
|
for file in sorted(os.listdir(preview_dir)):
|
||||||
|
if not file.startswith(file_start):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if file < start_file:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if file > end_file:
|
||||||
|
break
|
||||||
|
|
||||||
|
playlist_lines.append(f"file '{os.path.join(preview_dir, file)}'")
|
||||||
|
playlist_lines.append("duration 0.12")
|
||||||
|
|
||||||
|
if playlist_lines:
|
||||||
|
last_file = playlist_lines[-2]
|
||||||
|
playlist_lines.append(last_file)
|
||||||
|
codec = "-c:v libx264"
|
||||||
|
|
||||||
|
# get full set of previews
|
||||||
|
export_previews = (
|
||||||
|
Previews.select(
|
||||||
|
Previews.path,
|
||||||
|
Previews.start_time,
|
||||||
|
Previews.end_time,
|
||||||
|
)
|
||||||
|
.where(
|
||||||
|
Previews.start_time.between(self.start_time, self.end_time)
|
||||||
|
| Previews.end_time.between(self.start_time, self.end_time)
|
||||||
|
| (
|
||||||
|
(self.start_time > Previews.start_time)
|
||||||
|
& (self.end_time < Previews.end_time)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.where(Previews.camera == self.camera)
|
||||||
|
.order_by(Previews.start_time.asc())
|
||||||
|
.namedtuples()
|
||||||
|
.iterator()
|
||||||
|
)
|
||||||
|
|
||||||
|
preview: Previews
|
||||||
|
for preview in export_previews:
|
||||||
|
playlist_lines.append(f"file '{preview.path}'")
|
||||||
|
|
||||||
|
if preview.start_time < self.start_time:
|
||||||
|
playlist_lines.append(
|
||||||
|
f"inpoint {int(self.start_time - preview.start_time)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if preview.end_time > self.end_time:
|
||||||
|
playlist_lines.append(
|
||||||
|
f"outpoint {int(preview.end_time - self.end_time)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
ffmpeg_input = (
|
||||||
|
"-y -protocol_whitelist pipe,file,tcp -f concat -safe 0 -i /dev/stdin"
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.playback_factor == PlaybackFactorEnum.realtime:
|
||||||
|
ffmpeg_cmd = (
|
||||||
|
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} {codec} -movflags +faststart {video_path}"
|
||||||
|
).split(" ")
|
||||||
elif self.playback_factor == PlaybackFactorEnum.timelapse_25x:
|
elif self.playback_factor == PlaybackFactorEnum.timelapse_25x:
|
||||||
ffmpeg_cmd = (
|
ffmpeg_cmd = (
|
||||||
parse_preset_hardware_acceleration_encode(
|
parse_preset_hardware_acceleration_encode(
|
||||||
@@ -244,6 +317,36 @@ class RecordingExporter(threading.Thread):
|
|||||||
)
|
)
|
||||||
).split(" ")
|
).split(" ")
|
||||||
|
|
||||||
|
return ffmpeg_cmd, playlist_lines
|
||||||
|
|
||||||
|
def run(self) -> None:
|
||||||
|
logger.debug(
|
||||||
|
f"Beginning export for {self.camera} from {self.start_time} to {self.end_time}"
|
||||||
|
)
|
||||||
|
export_name = (
|
||||||
|
self.user_provided_name
|
||||||
|
or f"{self.camera.replace('_', ' ')} {self.get_datetime_from_timestamp(self.start_time)} {self.get_datetime_from_timestamp(self.end_time)}"
|
||||||
|
)
|
||||||
|
video_path = f"{EXPORT_DIR}/{self.export_id}.mp4"
|
||||||
|
thumb_path = self.save_thumbnail(self.export_id)
|
||||||
|
|
||||||
|
Export.insert(
|
||||||
|
{
|
||||||
|
Export.id: self.export_id,
|
||||||
|
Export.camera: self.camera,
|
||||||
|
Export.name: export_name,
|
||||||
|
Export.date: self.start_time,
|
||||||
|
Export.video_path: video_path,
|
||||||
|
Export.thumb_path: thumb_path,
|
||||||
|
Export.in_progress: True,
|
||||||
|
}
|
||||||
|
).execute()
|
||||||
|
|
||||||
|
if self.playback_source == PlaybackSourceEnum.recordings:
|
||||||
|
ffmpeg_cmd, playlist_lines = self.get_record_export_command(video_path)
|
||||||
|
else:
|
||||||
|
ffmpeg_cmd, playlist_lines = self.get_preview_export_command(video_path)
|
||||||
|
|
||||||
p = sp.run(
|
p = sp.run(
|
||||||
ffmpeg_cmd,
|
ffmpeg_cmd,
|
||||||
input="\n".join(playlist_lines),
|
input="\n".join(playlist_lines),
|
||||||
@@ -254,7 +357,7 @@ class RecordingExporter(threading.Thread):
|
|||||||
|
|
||||||
if p.returncode != 0:
|
if p.returncode != 0:
|
||||||
logger.error(
|
logger.error(
|
||||||
f"Failed to export recording for command {' '.join(ffmpeg_cmd)}"
|
f"Failed to export {self.playback_source.value} for command {' '.join(ffmpeg_cmd)}"
|
||||||
)
|
)
|
||||||
logger.error(p.stderr)
|
logger.error(p.stderr)
|
||||||
Path(video_path).unlink(missing_ok=True)
|
Path(video_path).unlink(missing_ok=True)
|
||||||
|
|||||||
@@ -142,6 +142,8 @@ class RecordingMaintainer(threading.Thread):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# see if the recording mover is too slow and segments need to be deleted
|
||||||
if processed_segment_count > keep_count:
|
if processed_segment_count > keep_count:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Unable to keep up with recording segments in cache for {camera}. Keeping the {keep_count} most recent segments out of {processed_segment_count} and discarding the rest..."
|
f"Unable to keep up with recording segments in cache for {camera}. Keeping the {keep_count} most recent segments out of {processed_segment_count} and discarding the rest..."
|
||||||
@@ -153,6 +155,21 @@ class RecordingMaintainer(threading.Thread):
|
|||||||
self.end_time_cache.pop(cache_path, None)
|
self.end_time_cache.pop(cache_path, None)
|
||||||
grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]
|
grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]
|
||||||
|
|
||||||
|
# see if detection has failed and unprocessed segments need to be deleted
|
||||||
|
unprocessed_segment_count = (
|
||||||
|
len(grouped_recordings[camera]) - processed_segment_count
|
||||||
|
)
|
||||||
|
if unprocessed_segment_count > keep_count:
|
||||||
|
logger.warning(
|
||||||
|
f"Too many unprocessed recording segments in cache for {camera}. This likely indicates an issue with the detect stream, keeping the {keep_count} most recent segments out of {unprocessed_segment_count} and discarding the rest..."
|
||||||
|
)
|
||||||
|
to_remove = grouped_recordings[camera][:-keep_count]
|
||||||
|
for rec in to_remove:
|
||||||
|
cache_path = rec["cache_path"]
|
||||||
|
Path(cache_path).unlink(missing_ok=True)
|
||||||
|
self.end_time_cache.pop(cache_path, None)
|
||||||
|
grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]
|
||||||
|
|
||||||
tasks = []
|
tasks = []
|
||||||
for camera, recordings in grouped_recordings.items():
|
for camera, recordings in grouped_recordings.items():
|
||||||
# clear out all the object recording info for old frames
|
# clear out all the object recording info for old frames
|
||||||
@@ -282,16 +299,12 @@ class RecordingMaintainer(threading.Thread):
|
|||||||
# if it doesn't overlap with an event, go ahead and drop the segment
|
# if it doesn't overlap with an event, go ahead and drop the segment
|
||||||
# if it ends more than the configured pre_capture for the camera
|
# if it ends more than the configured pre_capture for the camera
|
||||||
else:
|
else:
|
||||||
pre_capture = max(
|
|
||||||
record_config.alerts.pre_capture,
|
|
||||||
record_config.detections.pre_capture,
|
|
||||||
)
|
|
||||||
camera_info = self.object_recordings_info[camera]
|
camera_info = self.object_recordings_info[camera]
|
||||||
most_recently_processed_frame_time = (
|
most_recently_processed_frame_time = (
|
||||||
camera_info[-1][0] if len(camera_info) > 0 else 0
|
camera_info[-1][0] if len(camera_info) > 0 else 0
|
||||||
)
|
)
|
||||||
retain_cutoff = datetime.datetime.fromtimestamp(
|
retain_cutoff = datetime.datetime.fromtimestamp(
|
||||||
most_recently_processed_frame_time - pre_capture
|
most_recently_processed_frame_time - record_config.event_pre_capture
|
||||||
).astimezone(datetime.timezone.utc)
|
).astimezone(datetime.timezone.utc)
|
||||||
if end_time < retain_cutoff:
|
if end_time < retain_cutoff:
|
||||||
Path(cache_path).unlink(missing_ok=True)
|
Path(cache_path).unlink(missing_ok=True)
|
||||||
@@ -501,6 +514,7 @@ class RecordingMaintainer(threading.Thread):
|
|||||||
if topic == DetectionTypeEnum.video:
|
if topic == DetectionTypeEnum.video:
|
||||||
(
|
(
|
||||||
camera,
|
camera,
|
||||||
|
_,
|
||||||
frame_time,
|
frame_time,
|
||||||
current_tracked_objects,
|
current_tracked_objects,
|
||||||
motion_boxes,
|
motion_boxes,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user