Compare commits
140 Commits
v0.9.0-rc3
...
v0.10.0-be
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e6ec5cb097 | ||
|
|
23c70acd51 | ||
|
|
091648187f | ||
|
|
2b7d38f947 | ||
|
|
f801930588 | ||
|
|
955c2779d9 | ||
|
|
037f8667a6 | ||
|
|
307068a61f | ||
|
|
077d900b44 | ||
|
|
92f9195075 | ||
|
|
82c60093d1 | ||
|
|
944b9181e0 | ||
|
|
326b368e82 | ||
|
|
040d8c9778 | ||
|
|
273f803c7c | ||
|
|
bd8e23833c | ||
|
|
9edf38347c | ||
|
|
1569ce7cf6 | ||
|
|
db1255aa7f | ||
|
|
609b436ed8 | ||
|
|
95bdf9fe34 | ||
|
|
251d29aa38 | ||
|
|
156e1a4dc2 | ||
|
|
a5c13e7455 | ||
|
|
fcb4aaef0d | ||
|
|
589432bc89 | ||
|
|
b19a02888a | ||
|
|
18fd50dfce | ||
|
|
df0246aed8 | ||
|
|
cbb2882123 | ||
|
|
9f18629df3 | ||
|
|
63f8034e46 | ||
|
|
f3efc0667f | ||
|
|
af001321a8 | ||
|
|
92e08b92f5 | ||
|
|
26241b0877 | ||
|
|
c1155af169 | ||
|
|
77c1f1bb1b | ||
|
|
ae3c01fe2d | ||
|
|
7a2a85d253 | ||
|
|
77c66d4e49 | ||
|
|
494e5ac4ec | ||
|
|
63b7465452 | ||
|
|
e6d2df5661 | ||
|
|
a3301e0347 | ||
|
|
3d556cc2cb | ||
|
|
585efe1a0f | ||
|
|
c7d47439dd | ||
|
|
19a6978228 | ||
|
|
1ebb8a54bf | ||
|
|
ae968044d6 | ||
|
|
b912851e49 | ||
|
|
14c74e4361 | ||
|
|
51fb532e1a | ||
|
|
3541f966e3 | ||
|
|
c7faef8faa | ||
|
|
cdd3000315 | ||
|
|
1c1c28d0e5 | ||
|
|
4422e86907 | ||
|
|
8f43a2d109 | ||
|
|
bd7755fdd3 | ||
|
|
d554175631 | ||
|
|
ff667b019a | ||
|
|
57dcb29f8b | ||
|
|
9dc6c423b7 | ||
|
|
58117e2a3e | ||
|
|
5bec438f9c | ||
|
|
24cc63d6d3 | ||
|
|
d17bd74c9a | ||
|
|
8f101ccca8 | ||
|
|
b63c56d810 | ||
|
|
61c62d4685 | ||
|
|
26ae6084ea | ||
|
|
76142e9699 | ||
|
|
5e692acfbb | ||
|
|
a67b8ab84d | ||
|
|
4cf55ad8e2 | ||
|
|
c1132e6897 | ||
|
|
d6104f2eb2 | ||
|
|
b0e0abe385 | ||
|
|
4916e1cd1d | ||
|
|
cd87f3e6f4 | ||
|
|
18f4ab2644 | ||
|
|
0bd3be94ec | ||
|
|
25bb515afc | ||
|
|
7ab6961ee1 | ||
|
|
ae24cf3bb2 | ||
|
|
2e494477a6 | ||
|
|
80b72c75d9 | ||
|
|
9494bb7f5f | ||
|
|
86a741b6e6 | ||
|
|
f738275d21 | ||
|
|
e297e02800 | ||
|
|
b2e05afff2 | ||
|
|
05fc35fc3d | ||
|
|
c809494c98 | ||
|
|
ef82c5c691 | ||
|
|
c0e2a75715 | ||
|
|
01ddd00bc5 | ||
|
|
d150f01a2c | ||
|
|
f9e159deaf | ||
|
|
381b00157e | ||
|
|
800f33e7be | ||
|
|
b8218876be | ||
|
|
5669f4c161 | ||
|
|
c492b30adb | ||
|
|
eb48722126 | ||
|
|
8e881b60f0 | ||
|
|
0260d824a6 | ||
|
|
0877a7dec7 | ||
|
|
4c7919ad69 | ||
|
|
4e997124b3 | ||
|
|
8b040f5c95 | ||
|
|
96156805ed | ||
|
|
b8d48d7e62 | ||
|
|
8ca12806ca | ||
|
|
de811b7018 | ||
|
|
7bf7365f6c | ||
|
|
1daffd92fd | ||
|
|
74986982a0 | ||
|
|
aa807d25ed | ||
|
|
cd28869649 | ||
|
|
ae97692883 | ||
|
|
e8e778c6d4 | ||
|
|
5c552a0d71 | ||
|
|
0f5dfea9de | ||
|
|
e6cdb6a7a2 | ||
|
|
1d25936f31 | ||
|
|
1049673413 | ||
|
|
c3109f808c | ||
|
|
a943ac1308 | ||
|
|
96319e795c | ||
|
|
5a8016de87 | ||
|
|
bc350644bd | ||
|
|
c793500ad2 | ||
|
|
1b2134c49e | ||
|
|
86a5b46c68 | ||
|
|
f83d4a58dd | ||
|
|
a5f241d5bd | ||
|
|
661f7baa21 |
@@ -7,4 +7,5 @@ config/
|
||||
.git
|
||||
core
|
||||
*.mp4
|
||||
*.db
|
||||
*.db
|
||||
*.ts
|
||||
56
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,56 +0,0 @@
|
||||
---
|
||||
name: Bug report or Support request
|
||||
about: Bug report or Support request
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what your issue is.
|
||||
|
||||
**Version of frigate**
|
||||
Output from `/api/version`
|
||||
|
||||
**Config file**
|
||||
Include your full config file wrapped in triple back ticks.
|
||||
```yaml
|
||||
config here
|
||||
```
|
||||
|
||||
**Frigate container logs**
|
||||
```
|
||||
Include relevant log output here
|
||||
```
|
||||
|
||||
**Frigate stats**
|
||||
```json
|
||||
Output from frigate's /api/stats endpoint
|
||||
```
|
||||
|
||||
**FFprobe from your camera**
|
||||
|
||||
Run the following command and paste output below
|
||||
```
|
||||
ffprobe <stream_url>
|
||||
```
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Computer Hardware**
|
||||
- OS: [e.g. Ubuntu, Windows]
|
||||
- Install method: [e.g. Addon, Docker Compose, Docker Command]
|
||||
- Virtualization: [e.g. Proxmox, Virtualbox]
|
||||
- Coral Version: [e.g. USB, PCIe, None]
|
||||
- Network Setup: [e.g. Wired, WiFi]
|
||||
|
||||
**Camera Info:**
|
||||
- Manufacturer: [e.g. Dahua]
|
||||
- Model: [e.g. IPC-HDW5231R-ZE]
|
||||
- Resolution: [e.g. 720p]
|
||||
- FPS: [e.g. 5]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
blank_issues_enabled: false
|
||||
107
.github/ISSUE_TEMPLATE/support_request.yml
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
name: Support Request
|
||||
description: Support for Frigate setup or configuration
|
||||
title: "[Support]: "
|
||||
labels: ["support", "triage"]
|
||||
assignees: []
|
||||
body:
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Describe the problem you are having
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: Version
|
||||
description: Visible on the Debug page in the Web UI
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
label: Frigate config file
|
||||
description: This will be automatically formatted into code, so no need for backticks.
|
||||
render: yaml
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: ffprobe
|
||||
attributes:
|
||||
label: FFprobe output from your camera
|
||||
description: Run `ffprobe <camera_url>` and provide output below
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: stats
|
||||
attributes:
|
||||
label: Frigate stats
|
||||
description: Output from frigate's /api/stats endpoint
|
||||
render: json
|
||||
- type: dropdown
|
||||
id: os
|
||||
attributes:
|
||||
label: Operating system
|
||||
options:
|
||||
- HassOS
|
||||
- Debian
|
||||
- Other Linux
|
||||
- Proxmox
|
||||
- UNRAID
|
||||
- Windows
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: install-method
|
||||
attributes:
|
||||
label: Install method
|
||||
options:
|
||||
- HassOS Addon
|
||||
- Docker Compose
|
||||
- Docker CLI
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: coral
|
||||
attributes:
|
||||
label: Coral version
|
||||
options:
|
||||
- USB
|
||||
- PCIe
|
||||
- M.2
|
||||
- Dev Board
|
||||
- Other
|
||||
- CPU (no coral)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: network
|
||||
attributes:
|
||||
label: Network connection
|
||||
options:
|
||||
- Wired
|
||||
- Wireless
|
||||
- Mixed
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: camera
|
||||
attributes:
|
||||
label: Camera make and model
|
||||
description: Dahua, hikvision, amcrest, reolink, etc and model number
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: other
|
||||
attributes:
|
||||
label: Any other information that may be helpful
|
||||
24
.github/workflows/pull_request.yml
vendored
@@ -44,3 +44,27 @@ jobs:
|
||||
- name: Test
|
||||
run: npm run test
|
||||
working-directory: ./web
|
||||
|
||||
docker_tests_on_aarch64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Build and run tests
|
||||
run: make run_tests PLATFORM="linux/arm64/v8" ARCH="aarch64"
|
||||
|
||||
docker_tests_on_amd64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Build and run tests
|
||||
run: make run_tests PLATFORM="linux/amd64" ARCH="amd64"
|
||||
|
||||
28
.github/workflows/push.yml
vendored
@@ -1,28 +0,0 @@
|
||||
name: On push
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-0.8.0
|
||||
|
||||
jobs:
|
||||
deploy-docs:
|
||||
name: Deploy docs
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./docs
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/setup-node@master
|
||||
with:
|
||||
node-version: 12.x
|
||||
- run: npm install
|
||||
- name: Build docs
|
||||
run: npm run build
|
||||
- name: Deploy documentation
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./docs/build
|
||||
2
.gitignore
vendored
@@ -6,7 +6,9 @@ debug
|
||||
config/config.yml
|
||||
models
|
||||
*.mp4
|
||||
*.ts
|
||||
*.db
|
||||
*.csv
|
||||
frigate/version.py
|
||||
web/build
|
||||
web/node_modules
|
||||
|
||||
18
Makefile
@@ -3,7 +3,7 @@ default_target: amd64_frigate
|
||||
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
|
||||
|
||||
version:
|
||||
echo "VERSION='0.9.0-$(COMMIT_HASH)'" > frigate/version.py
|
||||
echo "VERSION='0.10.0-$(COMMIT_HASH)'" > frigate/version.py
|
||||
|
||||
web:
|
||||
docker build --tag frigate-web --file docker/Dockerfile.web web/
|
||||
@@ -39,7 +39,7 @@ aarch64_wheels:
|
||||
docker build --tag blakeblackshear/frigate-wheels:1.0.3-aarch64 --file docker/Dockerfile.wheels .
|
||||
|
||||
aarch64_ffmpeg:
|
||||
docker build --no-cache --pull --tag blakeblackshear/frigate-ffmpeg:1.2.0-aarch64 --file docker/Dockerfile.ffmpeg.aarch64 .
|
||||
docker build --no-cache --pull --tag blakeblackshear/frigate-ffmpeg:1.3.0-aarch64 --file docker/Dockerfile.ffmpeg.aarch64 .
|
||||
|
||||
aarch64_frigate: version web
|
||||
docker build --no-cache --tag frigate-base --build-arg ARCH=aarch64 --build-arg FFMPEG_VERSION=1.0.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.2 --file docker/Dockerfile.base .
|
||||
@@ -59,4 +59,16 @@ armv7_frigate: version web
|
||||
|
||||
armv7_all: armv7_wheels armv7_ffmpeg armv7_frigate
|
||||
|
||||
.PHONY: web
|
||||
run_tests:
|
||||
# PLATFORM: linux/arm64/v8 linux/amd64 or linux/arm/v7
|
||||
# ARCH: aarch64 amd64 or armv7
|
||||
@cat docker/Dockerfile.base docker/Dockerfile.$(ARCH) > docker/Dockerfile.test
|
||||
@sed -i "s/FROM frigate-web as web/#/g" docker/Dockerfile.test
|
||||
@sed -i "s/COPY --from=web \/opt\/frigate\/build web\//#/g" docker/Dockerfile.test
|
||||
@sed -i "s/FROM frigate-base/#/g" docker/Dockerfile.test
|
||||
@echo "" >> docker/Dockerfile.test
|
||||
@echo "RUN python3 -m unittest" >> docker/Dockerfile.test
|
||||
@docker buildx build --platform=$(PLATFORM) --tag frigate-base --build-arg NGINX_VERSION=1.0.2 --build-arg FFMPEG_VERSION=1.0.0 --build-arg ARCH=$(ARCH) --build-arg WHEELS_VERSION=1.0.3 --file docker/Dockerfile.test .
|
||||
@rm docker/Dockerfile.test
|
||||
|
||||
.PHONY: web run_tests
|
||||
|
||||
@@ -20,7 +20,7 @@ Use of a [Google Coral Accelerator](https://coral.ai/products/) is optional, but
|
||||
|
||||
## Documentation
|
||||
|
||||
View the documentation at https://blakeblackshear.github.io/frigate
|
||||
View the documentation at https://docs.frigate.video
|
||||
|
||||
## Donations
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ WORKDIR /tmp/workdir
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get -yqq update && \
|
||||
apt-get install -yq --no-install-recommends ca-certificates expat libgomp1 && \
|
||||
apt-get install -yq --no-install-recommends ca-certificates expat libgomp1 xutils-dev && \
|
||||
apt-get autoremove -y && \
|
||||
apt-get clean -y
|
||||
|
||||
@@ -18,7 +18,7 @@ FROM base as build
|
||||
ENV FFMPEG_VERSION=4.3.2 \
|
||||
AOM_VERSION=v1.0.0 \
|
||||
FDKAAC_VERSION=0.1.5 \
|
||||
FREETYPE_VERSION=2.5.5 \
|
||||
FREETYPE_VERSION=2.11.0 \
|
||||
FRIBIDI_VERSION=0.19.7 \
|
||||
KVAZAAR_VERSION=1.2.0 \
|
||||
LAME_VERSION=3.100 \
|
||||
@@ -43,7 +43,7 @@ ENV FFMPEG_VERSION=4.3.2 \
|
||||
LIBZMQ_VERSION=4.3.2 \
|
||||
SRC=/usr/local
|
||||
|
||||
ARG FREETYPE_SHA256SUM="5d03dd76c2171a7601e9ce10551d52d4471cf92cd205948e60289251daddffa8 freetype-2.5.5.tar.gz"
|
||||
ARG FREETYPE_SHA256SUM="a45c6b403413abd5706f3582f04c8339d26397c4304b78fa552f2215df64101f freetype-2.11.0.tar.gz"
|
||||
ARG FRIBIDI_SHA256SUM="3fc96fa9473bd31dcb5500bdf1aa78b337ba13eb8c301e7c28923fea982453a8 0.19.7.tar.gz"
|
||||
ARG LIBVIDSTAB_SHA256SUM="14d2a053e56edad4f397be0cb3ef8eb1ec3150404ce99a426c4eb641861dc0bb v1.1.0.tar.gz"
|
||||
ARG OGG_SHA256SUM="e19ee34711d7af328cb26287f4137e70630e7261b17cbe3cd41011d73a654692 libogg-1.3.2.tar.gz"
|
||||
@@ -392,6 +392,16 @@ RUN \
|
||||
make -j $(nproc) install && \
|
||||
rm -rf ${DIR}
|
||||
|
||||
|
||||
RUN \
|
||||
DIR=/tmp/rkmpp && \
|
||||
mkdir -p ${DIR} && \
|
||||
cd ${DIR} && \
|
||||
git clone https://github.com/rockchip-linux/libdrm-rockchip && git clone https://github.com/rockchip-linux/mpp && \
|
||||
cd libdrm-rockchip && bash autogen.sh && ./configure && make && make install && \
|
||||
cd ../mpp && cmake -DRKPLATFORM=ON -DHAVE_DRM=ON && make -j6 && make install && \
|
||||
rm -rf ${DIR}
|
||||
|
||||
## ffmpeg https://ffmpeg.org/
|
||||
RUN \
|
||||
DIR=/tmp/ffmpeg && mkdir -p ${DIR} && cd ${DIR} && \
|
||||
@@ -434,6 +444,8 @@ RUN \
|
||||
--enable-libkvazaar \
|
||||
--enable-libaom \
|
||||
--extra-libs=-lpthread \
|
||||
--enable-rkmpp \
|
||||
--enable-libdrm \
|
||||
# --enable-omx \
|
||||
# --enable-omx-rpi \
|
||||
# --enable-mmal \
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
daemon off;
|
||||
user root;
|
||||
worker_processes 1;
|
||||
|
||||
error_log /usr/local/nginx/logs/error.log warn;
|
||||
@@ -57,7 +58,7 @@ http {
|
||||
|
||||
# vod caches
|
||||
vod_metadata_cache metadata_cache 512m;
|
||||
vod_mapping_cache mapping_cache 5m;
|
||||
vod_mapping_cache mapping_cache 5m 10m;
|
||||
|
||||
# gzip manifests
|
||||
gzip on;
|
||||
|
||||
@@ -48,3 +48,17 @@ This may need to be in a custom location if network storage is used for the medi
|
||||
If using a custom model, the width and height will need to be specified.
|
||||
|
||||
The labelmap can be customized to your needs. A common reason to do this is to combine multiple object types that are easily confused when you don't need to be as granular such as car/truck. By default, truck is renamed to car because they are often confused. You cannot add new object types, but you can change the names of existing objects in the model.
|
||||
|
||||
```yaml
|
||||
model:
|
||||
labelmap:
|
||||
2: vehicle
|
||||
3: vehicle
|
||||
5: vehicle
|
||||
7: vehicle
|
||||
15: animal
|
||||
16: animal
|
||||
17: animal
|
||||
```
|
||||
|
||||
Note that if you rename objects in the labelmap, you will also need to update your `objects -> track` list as well.
|
||||
|
||||
@@ -8,19 +8,7 @@ title: Camera Specific Configurations
|
||||
The input and output parameters need to be adjusted for MJPEG cameras
|
||||
|
||||
```yaml
|
||||
input_args:
|
||||
- -avoid_negative_ts
|
||||
- make_zero
|
||||
- -fflags
|
||||
- nobuffer
|
||||
- -flags
|
||||
- low_delay
|
||||
- -strict
|
||||
- experimental
|
||||
- -fflags
|
||||
- +genpts+discardcorrupt
|
||||
- -use_wallclock_as_timestamps
|
||||
- "1"
|
||||
input_args: -avoid_negative_ts make_zero -fflags nobuffer -flags low_delay -strict experimental -fflags +genpts+discardcorrupt -use_wallclock_as_timestamps 1
|
||||
```
|
||||
|
||||
Note that mjpeg cameras require encoding the video into h264 for recording, and rtmp roles. This will use significantly more CPU than if the cameras supported h264 feeds directly.
|
||||
@@ -31,50 +19,69 @@ output_args:
|
||||
rtmp: -c:v libx264 -an -f flv
|
||||
```
|
||||
|
||||
### RTMP Cameras (Reolink 410/520 and possibly others)
|
||||
### RTMP Cameras
|
||||
|
||||
The input parameters need to be adjusted for RTMP cameras
|
||||
|
||||
```yaml
|
||||
ffmpeg:
|
||||
input_args:
|
||||
- -avoid_negative_ts
|
||||
- make_zero
|
||||
- -fflags
|
||||
- nobuffer
|
||||
- -flags
|
||||
- low_delay
|
||||
- -strict
|
||||
- experimental
|
||||
- -fflags
|
||||
- +genpts+discardcorrupt
|
||||
- -rw_timeout
|
||||
- "5000000"
|
||||
- -use_wallclock_as_timestamps
|
||||
- "1"
|
||||
- -f
|
||||
- live_flv
|
||||
input_args: -avoid_negative_ts make_zero -fflags nobuffer -flags low_delay -strict experimental -fflags +genpts+discardcorrupt -rw_timeout 5000000 -use_wallclock_as_timestamps 1 -f live_flv
|
||||
```
|
||||
|
||||
### Reolink 410/520 (possibly others)
|
||||
|
||||
According to [this discussion](https://github.com/blakeblackshear/frigate/issues/1713#issuecomment-932976305), the http video streams seem to be the most reliable for Reolink.
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
reolink:
|
||||
ffmpeg:
|
||||
hwaccel_args:
|
||||
input_args:
|
||||
- -avoid_negative_ts
|
||||
- make_zero
|
||||
- -fflags
|
||||
- nobuffer+genpts+discardcorrupt
|
||||
- -flags
|
||||
- low_delay
|
||||
- -strict
|
||||
- experimental
|
||||
- -analyzeduration
|
||||
- 1000M
|
||||
- -probesize
|
||||
- 1000M
|
||||
- -rw_timeout
|
||||
- "5000000"
|
||||
inputs:
|
||||
- path: http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password
|
||||
roles:
|
||||
- record
|
||||
- rtmp
|
||||
- path: http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password
|
||||
roles:
|
||||
- detect
|
||||
detect:
|
||||
width: 640
|
||||
height: 480
|
||||
fps: 7
|
||||
```
|
||||
|
||||

|
||||
|
||||
### Blue Iris RTSP Cameras
|
||||
|
||||
You will need to remove `nobuffer` flag for Blue Iris RTSP cameras
|
||||
|
||||
```yaml
|
||||
ffmpeg:
|
||||
input_args:
|
||||
- -avoid_negative_ts
|
||||
- make_zero
|
||||
- -flags
|
||||
- low_delay
|
||||
- -strict
|
||||
- experimental
|
||||
- -fflags
|
||||
- +genpts+discardcorrupt
|
||||
- -rtsp_transport
|
||||
- tcp
|
||||
- -stimeout
|
||||
- "5000000"
|
||||
- -use_wallclock_as_timestamps
|
||||
- "1"
|
||||
input_args: -avoid_negative_ts make_zero -flags low_delay -strict experimental -fflags +genpts+discardcorrupt -rtsp_transport tcp -stimeout 5000000 -use_wallclock_as_timestamps 1
|
||||
```
|
||||
|
||||
### UDP Only Cameras
|
||||
|
||||
If your cameras do not support TCP connections for RTSP, you can use UDP.
|
||||
|
||||
```yaml
|
||||
ffmpeg:
|
||||
input_args: -avoid_negative_ts make_zero -fflags +genpts+discardcorrupt -rtsp_transport udp -stimeout 5000000 -use_wallclock_as_timestamps 1
|
||||
```
|
||||
|
||||
@@ -31,6 +31,7 @@ detectors:
|
||||
```
|
||||
|
||||
### Native Coral (Dev Board)
|
||||
_warning: may have [compatibility issues](https://github.com/blakeblackshear/frigate/issues/1706) after `v0.9.x`_
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
|
||||
@@ -48,8 +48,8 @@ mqtt:
|
||||
# Optional: user
|
||||
user: mqtt_user
|
||||
# Optional: password
|
||||
# NOTE: Environment variables that begin with 'FRIGATE_' may be referenced in {}.
|
||||
# eg. password: '{FRIGATE_MQTT_PASSWORD}'
|
||||
# NOTE: MQTT password can be specified with an environment variables that must begin with 'FRIGATE_'.
|
||||
# e.g. password: '{FRIGATE_MQTT_PASSWORD}'
|
||||
password: password
|
||||
# Optional: tls_ca_certs for enabling TLS using self-signed certs (default: None)
|
||||
tls_ca_certs: /path/to/ca.crt
|
||||
@@ -159,6 +159,9 @@ detect:
|
||||
enabled: True
|
||||
# Optional: Number of frames without a detection before frigate considers an object to be gone. (default: 5x the frame rate)
|
||||
max_disappeared: 25
|
||||
# Optional: Frequency for running detection on stationary objects (default: 0)
|
||||
# When set to 0, object detection will never be run on stationary objects. If set to 10, it will be run on every 10th frame.
|
||||
stationary_interval: 0
|
||||
|
||||
# Optional: Object configuration
|
||||
# NOTE: Can be overridden at the camera level
|
||||
@@ -192,10 +195,14 @@ motion:
|
||||
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
|
||||
# The value should be between 1 and 255.
|
||||
threshold: 25
|
||||
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: ~0.17% of the motion frame area)
|
||||
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will make motion detection more sensitive to smaller
|
||||
# moving objects.
|
||||
contour_area: 100
|
||||
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: 30)
|
||||
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
|
||||
# make motion detection more sensitive to smaller moving objects.
|
||||
# As a rule of thumb:
|
||||
# - 15 - high sensitivity
|
||||
# - 30 - medium sensitivity
|
||||
# - 50 - low sensitivity
|
||||
contour_area: 30
|
||||
# Optional: Alpha value passed to cv2.accumulateWeighted when averaging the motion delta across multiple frames (default: shown below)
|
||||
# Higher values mean the current frame impacts the delta a lot, and a single raindrop may register as motion.
|
||||
# Too low and a fast moving person wont be detected as motion.
|
||||
@@ -205,10 +212,10 @@ motion:
|
||||
# Low values will cause things like moving shadows to be detected as motion for longer.
|
||||
# https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/
|
||||
frame_alpha: 0.2
|
||||
# Optional: Height of the resized motion frame (default: 1/6th of the original frame height, but no less than 180)
|
||||
# This operates as an efficient blur alternative. Higher values will result in more granular motion detection at the expense of higher CPU usage.
|
||||
# Lower values result in less CPU, but small changes may not register as motion.
|
||||
frame_height: 180
|
||||
# Optional: Height of the resized motion frame (default: 50)
|
||||
# This operates as an efficient blur alternative. Higher values will result in more granular motion detection at the expense
|
||||
# of higher CPU usage. Lower values result in less CPU, but small changes may not register as motion.
|
||||
frame_height: 50
|
||||
# Optional: motion mask
|
||||
# NOTE: see docs for more detailed info on creating masks
|
||||
mask: 0,900,1080,900,1080,1920,0,1920
|
||||
@@ -218,15 +225,26 @@ motion:
|
||||
record:
|
||||
# Optional: Enable recording (default: shown below)
|
||||
enabled: False
|
||||
# Optional: Number of days to retain recordings regardless of events (default: shown below)
|
||||
# NOTE: This should be set to 0 and retention should be defined in events section below
|
||||
# if you only want to retain recordings of events.
|
||||
retain_days: 0
|
||||
# Optional: Number of minutes to wait between cleanup runs (default: shown below)
|
||||
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
|
||||
expire_interval: 60
|
||||
# Optional: Retention settings for recording
|
||||
retain:
|
||||
# Optional: Number of days to retain recordings regardless of events (default: shown below)
|
||||
# NOTE: This should be set to 0 and retention should be defined in events section below
|
||||
# if you only want to retain recordings of events.
|
||||
days: 0
|
||||
# Optional: Mode for retention. Available options are: all, motion, and active_objects
|
||||
# all - save all recording segments regardless of activity
|
||||
# motion - save all recordings segments with any detected motion
|
||||
# active_objects - save all recording segments with active/moving objects
|
||||
# NOTE: this mode only applies when the days setting above is greater than 0
|
||||
mode: all
|
||||
# Optional: Event recording settings
|
||||
events:
|
||||
# Optional: Maximum length of time to retain video during long events. (default: shown below)
|
||||
# NOTE: If an object is being tracked for longer than this amount of time, the retained recordings
|
||||
# will be the last x seconds of the event unless retain_days under record is > 0.
|
||||
# will be the last x seconds of the event unless retain->days under record is > 0.
|
||||
max_seconds: 300
|
||||
# Optional: Number of seconds before the event to include (default: shown below)
|
||||
pre_capture: 5
|
||||
@@ -241,6 +259,16 @@ record:
|
||||
retain:
|
||||
# Required: Default retention days (default: shown below)
|
||||
default: 10
|
||||
# Optional: Mode for retention. (default: shown below)
|
||||
# all - save all recording segments for events regardless of activity
|
||||
# motion - save all recordings segments for events with any detected motion
|
||||
# active_objects - save all recording segments for event with active/moving objects
|
||||
#
|
||||
# NOTE: If the retain mode for the camera is more restrictive than the mode configured
|
||||
# here, the segments will already be gone by the time this mode is applied.
|
||||
# For example, if the camera retain mode is "motion", the segments without motion are
|
||||
# never stored, so setting the mode to "all" here won't bring them back.
|
||||
mode: motion
|
||||
# Optional: Per object retention days
|
||||
objects:
|
||||
person: 15
|
||||
@@ -319,7 +347,7 @@ cameras:
|
||||
# Required: A list of input streams for the camera. See documentation for more information.
|
||||
inputs:
|
||||
# Required: the path to the stream
|
||||
# NOTE: Environment variables that begin with 'FRIGATE_' may be referenced in {}
|
||||
# NOTE: path may include environment variables, which must begin with 'FRIGATE_' and be referenced in {}
|
||||
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||
# Required: list of roles for this stream. valid values are: detect,record,rtmp
|
||||
# NOTICE: In addition to assigning the record, and rtmp roles,
|
||||
|
||||
@@ -36,4 +36,42 @@ motion:
|
||||
- 0,461,3,0,1919,0,1919,843,1699,492,1344
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
### Further Clarification
|
||||
|
||||
This is a response to a [question posed on reddit](https://www.reddit.com/r/homeautomation/comments/ppxdve/replacing_my_doorbell_with_a_security_camera_a_6/hd876w4?utm_source=share&utm_medium=web2x&context=3):
|
||||
|
||||
It is helpful to understand a bit about how Frigate uses motion detection and object detection together.
|
||||
|
||||
First, Frigate uses motion detection as a first line check to see if there is anything happening in the frame worth checking with object detection.
|
||||
|
||||
Once motion is detected, it tries to group up nearby areas of motion together in hopes of identifying a rectangle in the image that will capture the area worth inspecting. These are the red "motion boxes" you see in the debug viewer.
|
||||
|
||||
After the area with motion is identified, Frigate creates a "region" (the green boxes in the debug viewer) to run object detection on. The models are trained on square images, so these regions are always squares. It adds a margin around the motion area in hopes of capturing a cropped view of the object moving that fills most of the image passed to object detection, but doesn't cut anything off. It also takes into consideration the location of the bounding box from the previous frame if it is tracking an object.
|
||||
|
||||
After object detection runs, if there are detected objects that seem to be cut off, Frigate reframes the region and runs object detection again on the same frame to get a better look.
|
||||
|
||||
All of this happens for each area of motion and tracked object.
|
||||
|
||||
> Are you simply saying that INITIAL triggering of any kind of detection will only happen in un-masked areas, but that once this triggering happens, the masks become irrelevant and object detection takes precedence?
|
||||
|
||||
Essentially, yes. I wouldn't describe it as object detection taking precedence though. The motion masks just prevent those areas from being counted as motion. Those masks do not modify the regions passed to object detection in any way, so you can absolutely detect objects in areas masked for motion.
|
||||
|
||||
> If so, this is completely expected and intuitive behavior for me. Because obviously if a "foot" starts motion detection the camera should be able to check if it's an entire person before it fully crosses into the zone. The docs imply this is the behavior, so I also don't understand why this would be detrimental to object detection on the whole.
|
||||
|
||||
When just a foot is triggering motion, Frigate will zoom in and look only at the foot. If that even qualifies as a person, it will determine the object is being cut off and look again and again until it zooms back out enough to find the whole person.
|
||||
|
||||
It is also detrimental to how Frigate tracks a moving object. Motion nearby the bounding box from the previous frame is used to intelligently determine where the region should be in the next frame. With too much masking, tracking is hampered and if an object walks from an unmasked area into a fully masked area, they essentially disappear and will be picked up as a "new" object if they leave the masked area. This is important because Frigate uses the history of scores while tracking an object to determine if it is a false positive or not. It takes a minimum of 3 frames for Frigate to determine is the object type it thinks it is, and the median score must be greater than the threshold. If a person meets this threshold while on the sidewalk before they walk into your stoop, you will get an alert the instant they step a single foot into a zone.
|
||||
|
||||
> I thought the main point of this feature was to cut down on CPU use when motion is happening in unnecessary areas.
|
||||
|
||||
It is, but the definition of "unnecessary" varies. I want to ignore areas of motion that I know are definitely not being triggered by objects of interest. Timestamps, trees, sky, rooftops. I don't want to ignore motion from objects that I want to track and know where they go.
|
||||
|
||||
> For me, giving my masks ANY padding results in a lot of people detection I'm not interested in. I live in the city and catch a lot of the sidewalk on my camera. People walk by my front door all the time and the margin between the sidewalk and actually walking onto my stoop is very thin, so I basically have everything but the exact contours of my stoop masked out. This results in very tidy detections but this info keeps throwing me off. Am I just overthinking it?
|
||||
|
||||
This is what `required_zones` are for. You should define a zone (remember this is evaluated based on the bottom center of the bounding box) and make it required to save snapshots and clips (now events in 0.9.0). You can also use this in your conditions for a notification.
|
||||
|
||||
> Maybe my specific situation just warrants this. I've just been having a hard time understanding the relevance of this information - it seems to be that it's exactly what would be expected when "masking out" an area of ANY image.
|
||||
|
||||
That may be the case for you. Frigate will definitely work harder tracking people on the sidewalk to make sure it doesn't miss anyone who steps foot on your stoop. The trade off with the way you have it now is slower recognition of objects and potential misses. That may be acceptable based on your needs. Also, if your resolution is low enough on the detect stream, your regions may already be so big that they grab the entire object anyway.
|
||||
|
||||
@@ -9,6 +9,8 @@ ffmpeg with NVDEC support is required. The special docker architecture 'amd64nvi
|
||||
includes this support for amd64 platforms. An aarch64 for the Jetson, which
|
||||
also includes NVDEC may be added in the future.
|
||||
|
||||
Some more detailed setup instructions are also available in [this issue](https://github.com/blakeblackshear/frigate/issues/1847#issuecomment-932076731).
|
||||
|
||||
## Docker setup
|
||||
|
||||
### Requirements
|
||||
|
||||
@@ -3,8 +3,22 @@ id: record
|
||||
title: Recording
|
||||
---
|
||||
|
||||
Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM/DD/HH/<camera_name>/MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy in the config.
|
||||
|
||||
Exported clips are also created off of these recordings. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed.
|
||||
Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM/DD/HH/<camera_name>/MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy in the config. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed.
|
||||
|
||||
H265 recordings can be viewed in Edge and Safari only. All other browsers require recordings to be encoded with H264.
|
||||
|
||||
## What if I don't want 24/7 recordings?
|
||||
|
||||
If you only used clips in previous versions with recordings disabled, you can use the following config to get the same behavior. This is also the default behavior when recordings are enabled.
|
||||
|
||||
```yaml
|
||||
record:
|
||||
enabled: True
|
||||
events:
|
||||
retain:
|
||||
default: 10
|
||||
```
|
||||
|
||||
This configuration will retain recording segments that overlap with events and have active tracked objects for 10 days. Because multiple events can reference the same recording segments, this avoids storing duplicate footage for overlapping events and reduces overall storage needs.
|
||||
|
||||
When `retain_days` is set to `0`, segments will be deleted from the cache if no events are in progress.
|
||||
|
||||
@@ -8,3 +8,31 @@ Zones allow you to define a specific area of the frame and apply additional filt
|
||||
During testing, enable the Zones option for the debug feed so you can adjust as needed. The zone line will increase in thickness when any object enters the zone.
|
||||
|
||||
To create a zone, follow [the steps for a "Motion mask"](/configuration/masks), but use the section of the web UI for creating a zone instead.
|
||||
|
||||
### Restricting zones to specific objects
|
||||
|
||||
Sometimes you want to limit a zone to specific object types to have more granular control of when events/snapshots are saved. The following example will limit one zone to person objects and the other to cars.
|
||||
|
||||
```yaml
|
||||
camera:
|
||||
record:
|
||||
events:
|
||||
required_zones:
|
||||
- entire_yard
|
||||
- front_yard_street
|
||||
snapshots:
|
||||
required_zones:
|
||||
- entire_yard
|
||||
- front_yard_street
|
||||
zones:
|
||||
entire_yard:
|
||||
coordinates: ... (everywhere you want a person)
|
||||
objects:
|
||||
- person
|
||||
front_yard_street:
|
||||
coordinates: ... (just the street)
|
||||
objects:
|
||||
- car
|
||||
```
|
||||
|
||||
Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. You will get events for person objects that enter anywhere in the yard, and events for cars only if they enter the street.
|
||||
|
||||
@@ -13,13 +13,13 @@ A solid green image means that frigate has not received any frames from ffmpeg.
|
||||
|
||||
### How can I get sound or audio in my recordings?
|
||||
|
||||
By default, Frigate removes audio from recordings to reduce the likelihood of failing for invalid data. If you would like to include audio, you need to override the output args to remove `-an` for where you want to include audio. The recommended audio codec is `aac`. Not all audio codecs are supported by RTMP, so you may need to re-encode your audio with `-c:a aac`. The default ffmpeg args are shown [here](/frigate/configuration/index#ffmpeg).
|
||||
By default, Frigate removes audio from recordings to reduce the likelihood of failing for invalid data. If you would like to include audio, you need to override the output args to remove `-an` for where you want to include audio. The recommended audio codec is `aac`. Not all audio codecs are supported by RTMP, so you may need to re-encode your audio with `-c:a aac`. The default ffmpeg args are shown [here](configuration/index#full-configuration-reference).
|
||||
|
||||
### My mjpeg stream or snapshots look green and crazy
|
||||
|
||||
This almost always means that the width/height defined for your camera are not correct. Double check the resolution with vlc or another player. Also make sure you don't have the width and height values backwards.
|
||||
|
||||

|
||||

|
||||
|
||||
### I can't view events or recordings in the Web UI.
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ The ideal resolution for detection is one where the objects you want to detect f
|
||||
|
||||
Larger resolutions **do** improve performance if the objects are very small in the frame.
|
||||
|
||||

|
||||

|
||||
|
||||
### Example Camera Configuration
|
||||
|
||||
|
||||
56
docs/docs/guides/ha_notifications.md
Normal file
@@ -0,0 +1,56 @@
|
||||
---
|
||||
id: ha_notifications
|
||||
title: Home Assistant notifications
|
||||
---
|
||||
|
||||
The best way to get started with notifications for Frigate is to use the [Blueprint](https://community.home-assistant.io/t/frigate-mobile-app-notifications/311091). You can use the yaml generated from the Blueprint as a starting point and customize from there.
|
||||
|
||||
It is generally recommended to trigger notifications based on the `frigate/events` mqtt topic. This provides the event_id needed to fetch [thumbnails/snapshots/clips](/integrations/home-assistant#notification-api) and other useful information to customize when and where you want to receive alerts. The data is published in the form of a change feed, which means you can reference the "previous state" of the object in the `before` section and the "current state" of the object in the `after` section. You can see an example [here](/integrations/mqtt#frigateevents).
|
||||
|
||||
Here is a simple example of a notification automation of events which will update the existing notification for each change. This means the image you see in the notification will update as frigate finds a "better" image.
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: Notify of events
|
||||
trigger:
|
||||
platform: mqtt
|
||||
topic: frigate/events
|
||||
action:
|
||||
- service: notify.mobile_app_pixel_3
|
||||
data_template:
|
||||
message: 'A {{trigger.payload_json["after"]["label"]}} was detected.'
|
||||
data:
|
||||
image: 'https://your.public.hass.address.com/api/frigate/notifications/{{trigger.payload_json["after"]["id"]}}/thumbnail.jpg?format=android'
|
||||
tag: '{{trigger.payload_json["after"]["id"]}}'
|
||||
when: '{{trigger.payload_json["after"]["start_time"]|int}}'
|
||||
```
|
||||
|
||||
## Conditions
|
||||
|
||||
Conditions with the `before` and `after` values allow a high degree of customization for automations.
|
||||
|
||||
When a person enters a zone named yard
|
||||
|
||||
```yaml
|
||||
condition:
|
||||
- "{{ trigger.payload_json['after']['label'] == 'person' }}"
|
||||
- "{{ 'yard' in trigger.payload_json['after']['entered_zones'] }}"
|
||||
```
|
||||
|
||||
When a person leaves a zone named yard
|
||||
|
||||
```yaml
|
||||
condition:
|
||||
- "{{ trigger.payload_json['after']['label'] == 'person' }}"
|
||||
- "{{ 'yard' in trigger.payload_json['before']['current_zones'] }}"
|
||||
- "{{ not 'yard' in trigger.payload_json['after']['current_zones'] }}"
|
||||
```
|
||||
|
||||
Notify for dogs in the front with a high top score
|
||||
|
||||
```yaml
|
||||
condition:
|
||||
- "{{ trigger.payload_json['after']['label'] == 'dog' }}"
|
||||
- "{{ trigger.payload_json['after']['camera'] == 'front' }}"
|
||||
- "{{ trigger.payload_json['after']['top_score'] > 0.98 }}"
|
||||
```
|
||||
37
docs/docs/guides/stationary_objects.md
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
id: stationary_objects
|
||||
title: Avoiding stationary objects
|
||||
---
|
||||
|
||||
Many people use Frigate to detect cars entering their driveway, and they often run into an issue with repeated events of a parked car being repeatedly detected. This is because object tracking stops when motion ends and the event ends. Motion detection works by determining if a sufficient number of pixels have changed between frames. Shadows or other lighting changes will be detected as motion. This will often cause a new event for a parked car.
|
||||
|
||||
You can use zones to restrict events and notifications to objects that have entered specific areas.
|
||||
|
||||
:::caution
|
||||
|
||||
It is not recommended to use masks to try and eliminate parked cars in your driveway. Masks are designed to prevent motion from triggering object detection and/or to indicate areas that are guaranteed false positives.
|
||||
|
||||
Frigate is designed to track objects as they move and over-masking can prevent it from knowing that an object in the current frame is the same as the previous frame. You want Frigate to detect objects everywhere and configure your events and alerts to be based on the location of the object with zones.
|
||||
|
||||
:::
|
||||
|
||||
To only be notified of cars that enter your driveway from the street, you could create multiple zones that cover your driveway. For cars, you would only notify if `entered_zones` from the events MQTT topic has more than 1 zone.
|
||||
|
||||
See [this example](/configuration/zones#restricting-zones-to-specific-objects) from the Zones documentation to see how to restrict zones to certain object types.
|
||||
|
||||

|
||||
|
||||
To limit snapshots and events, you can list the zone for the entrance of your driveway under `required_zones` in your configuration file. Example below.
|
||||
|
||||
```yaml
|
||||
camera:
|
||||
record:
|
||||
events:
|
||||
required_zones:
|
||||
- zone_2
|
||||
zones:
|
||||
zone_1:
|
||||
coordinates: ... (parking area)
|
||||
zone_2:
|
||||
coordinates: ... (entrance to driveway)
|
||||
```
|
||||
@@ -9,25 +9,31 @@ Cameras that output H.264 video and AAC audio will offer the most compatibility
|
||||
|
||||
I recommend Dahua, Hikvision, and Amcrest in that order. Dahua edges out Hikvision because they are easier to find and order, not because they are better cameras. I personally use Dahua cameras because they are easier to purchase directly. In my experience Dahua and Hikvision both have multiple streams with configurable resolutions and frame rates and rock solid streams. They also both have models with large sensors well known for excellent image quality at night. Not all the models are equal. Larger sensors are better than higher resolutions; especially at night. Amcrest is the fallback recommendation because they are rebranded Dahuas. They are rebranding the lower end models with smaller sensors or less configuration options.
|
||||
|
||||
Many users have reported various issues with Reolink cameras, so I do not recommend them. If you are using Reolink, I suggest the [Reolink specific configuration](configuration/camera_specific#reolink-410520-possibly-others). Wifi cameras are also not recommended. Their streams are less reliable and cause connection loss and/or lost video data.
|
||||
|
||||
Here are some of the camera's I recommend:
|
||||
|
||||
- [Loryta(Dahua) T5442TM-AS-LED](https://www.amazon.com/Loryta-IPC-T5442TM-AS-LED-Starlight-Eyeball-Network/dp/B07S5QZJDH/)
|
||||
- [Loryta(Dahua) IPC-T5442TM-AS](https://www.amazon.com/Loryta-IPC-T5442TM-AS-Starlight-Eyeball-Network/dp/B07S21FVC7/)
|
||||
- [Amcrest IP5M-T1179EW-28MM](https://www.amazon.com/Amcrest-5-Megapixel-NightVision-Weatherproof-IP5M-T1179EW-28MM/dp/B083G9KT4C/)
|
||||
- <a href="https://amzn.to/3uFLtxB" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) T5442TM-AS-LED</a> (affiliate link)
|
||||
- <a href="https://amzn.to/3isJ3gU" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T5442TM-AS</a> (affiliate link)
|
||||
- <a href="https://amzn.to/2ZWNWIA" target="_blank" rel="nofollow noopener sponsored">Amcrest IP5M-T1179EW-28MM</a> (affiliate link)
|
||||
|
||||
I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||
|
||||
## Server
|
||||
|
||||
My current favorite is the Minisforum GK41 because the dual NICs allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet.
|
||||
My current favorite is the Odyssey X86 Blue J4125 because the Coral M.2 compatibility and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||
|
||||
| Name | Inference Speed | Notes |
|
||||
| ----------------------- | --------------- | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Minisforum GK41 | 9-10ms | Great alternative to a NUC with dual Gigabit NICs. Easily handles several 1080p cameras. |
|
||||
| Intel NUC NUC7i3BNK | 8-10ms | Great performance. Can handle many cameras at 5fps depending on typical amounts of motion. |
|
||||
| BMAX B2 Plus | 10-12ms | Good balance of performance and cost. Also capable of running many other services at the same time as frigate. |
|
||||
| Atomic Pi | 16ms | Good option for a dedicated low power board with a small number of cameras. Can leverage Intel QuickSync for stream decoding. |
|
||||
| Raspberry Pi 3B (32bit) | 60ms | Can handle a small number of cameras, but the detection speeds are slow due to USB 2.0. |
|
||||
| Raspberry Pi 4 (32bit) | 15-20ms | Can handle a small number of cameras. The 2GB version runs fine. |
|
||||
| Raspberry Pi 4 (64bit) | 10-15ms | Can handle a small number of cameras. The 2GB version runs fine. |
|
||||
| Name | Inference Speed | Coral Compatibility | Notes |
|
||||
| -------------------------------------------------------------------------------------------------------------------------------- | --------------- | ------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||
| <a href="https://amzn.to/3oH4BKi" target="_blank" rel="nofollow noopener sponsored">Odyssey X86 Blue J4125</a> (affiliate link) | 9-10ms | M.2 B+M | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
| <a href="https://amzn.to/3oxEC8m" target="_blank" rel="nofollow noopener sponsored">Minisforum GK41</a> (affiliate link) | 9-10ms | USB | Great alternative to a NUC. Easily handles several 1080p cameras. |
|
||||
| <a href="https://amzn.to/3ixJFlb" target="_blank" rel="nofollow noopener sponsored">Minisforum GK50</a> (affiliate link) | 9-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
| <a href="https://amzn.to/3l7vCEI" target="_blank" rel="nofollow noopener sponsored">Intel NUC</a> (affiliate link) | 8-10ms | USB | Overkill for most, but great performance. Can handle many cameras at 5fps depending on typical amounts of motion. |
|
||||
| <a href="https://amzn.to/3a6TBh8" target="_blank" rel="nofollow noopener sponsored">BMAX B2 Plus</a> (affiliate link) | 10-12ms | USB | Good balance of performance and cost. Also capable of running many other services at the same time as frigate. |
|
||||
| <a href="https://amzn.to/2YjpY9m" target="_blank" rel="nofollow noopener sponsored">Atomic Pi</a> (affiliate link) | 16ms | USB | Good option for a dedicated low power board with a small number of cameras. Can leverage Intel QuickSync for stream decoding. |
|
||||
| <a href="https://amzn.to/2WIpwRU" target="_blank" rel="nofollow noopener sponsored">Raspberry Pi 3B (32bit)</a> (affiliate link) | 60ms | USB | Can handle a small number of cameras, but the detection speeds are slow due to USB 2.0. |
|
||||
| <a href="https://amzn.to/2YhSGHH" target="_blank" rel="nofollow noopener sponsored">Raspberry Pi 4 (32bit)</a> (affiliate link) | 15-20ms | USB | Can handle a small number of cameras. The 2GB version runs fine. |
|
||||
| <a href="https://amzn.to/2YhSGHH" target="_blank" rel="nofollow noopener sponsored">Raspberry Pi 4 (64bit)</a> (affiliate link) | 10-15ms | USB | Can handle a small number of cameras. The 2GB version runs fine. |
|
||||
|
||||
## Google Coral TPU
|
||||
|
||||
@@ -38,3 +44,25 @@ The USB version is compatible with the widest variety of hardware and does not r
|
||||
The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai
|
||||
|
||||
A single Coral can handle many cameras and will be sufficient for the majority of users. You can calculate the maximum performance of your Coral based on the inference speed reported by Frigate. With an inference speed of 10, your Coral will top out at `1000/10=100`, or 100 frames per second. If your detection fps is regularly getting close to that, you should first consider tuning motion masks. If those are already properly configured, a second Coral may be needed.
|
||||
|
||||
### What does Frigate use the CPU for and what does it use the Coral for? (ELI5 Version)
|
||||
|
||||
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.
|
||||
|
||||
CPU Usage: I am a CPU, Mendel is a Google Coral
|
||||
|
||||
My buddy Mendel and I have been tasked with keeping the neighbor's red footed booby off my parent's yard. Now I'm really bad at identifying birds. It takes me forever, but my buddy Mendel is incredible at it.
|
||||
|
||||
Mendel however, struggles at pretty much anything else. So we make an agreement. I wait till I see something that moves, and snap a picture of it for Mendel. I then show him the picture and he tells me what it is. Most of the time it isn't anything. But eventually I see some movement and Mendel tells me it is the Booby. Score!
|
||||
|
||||
_What happens when I increase the resolution of my camera?_
|
||||
|
||||
However we realize that there is a problem. There is still booby poop all over the yard. How could we miss that! I've been watching all day! My parents check the window and realize its dirty and a bit small to see the entire yard so they clean it and put a bigger one in there. Now there is so much more to see! However I now have a much bigger area to scan for movement and have to work a lot harder! Even my buddy Mendel has to work harder, as now the pictures have a lot more detail in them that he has to look at to see if it is our sneaky booby.
|
||||
|
||||
Basically - When you increase the resolution and/or the frame rate of the stream there is now significantly more data for the CPU to parse. That takes additional computing power. The Google Coral is really good at doing object detection, but it doesn't have time to look everywhere all the time (especially when there are many windows to check). To balance it, Frigate uses the CPU to look for movement, then sends those frames to the Coral to do object detection. This allows the Coral to be available to a large number of cameras and not overload it.
|
||||
|
||||
### Do hwaccel args help if I am using a Coral?
|
||||
|
||||
YES! The Coral does not help with decoding video streams.
|
||||
|
||||
Decompressing video streams takes a significant amount of CPU power. Video compression uses key frames (also known as I-frames) to send a full frame in the video stream. The following frames only include the difference from the key frame, and the CPU has to compile each frame by merging the differences with the key frame. [More detailed explanation](https://blog.video.ibm.com/streaming-video-tips/keyframes-interframe-video-compression/). Higher resolutions and frame rates mean more processing power is needed to decode the video stream, so try and set them on the camera to avoid unnecessary decoding work.
|
||||
|
||||
@@ -20,6 +20,6 @@ Use of a [Google Coral Accelerator](https://coral.ai/products/) is optional, but
|
||||
|
||||
## Screenshots
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||
@@ -3,25 +3,82 @@ id: installation
|
||||
title: Installation
|
||||
---
|
||||
|
||||
Frigate is a Docker container that can be run on any Docker host including as a [HassOS Addon](https://www.home-assistant.io/addons/).
|
||||
Frigate is a Docker container that can be run on any Docker host including as a [HassOS Addon](https://www.home-assistant.io/addons/). Note that a Home Assistant Addon is **not** the same thing as the integration. The [integration](integrations/home-assistant) is required to integrate Frigate into Home Assistant.
|
||||
|
||||
Frigate requires an MQTT broker. If using the Home Assistant integration, Frigate and Home Assistant must be connected to the same MQTT server to function properly.
|
||||
## Dependencies
|
||||
|
||||
**MQTT broker** - Frigate requires an MQTT broker. If using Home Assistant, Frigate and Home Assistant must be connected to the same MQTT broker.
|
||||
|
||||
## Preparing your hardware
|
||||
|
||||
### Operating System
|
||||
|
||||
Frigate runs best with docker installed on bare metal debian-based distributions. For ideal performance, Frigate needs access to underlying hardware for the Coral and GPU devices. Running Frigate in a VM on top of Proxmox, ESXi, Virtualbox, etc. is not recommended. The virtualization layer often introduces a sizable amount of overhead for communication with Coral devices.
|
||||
Frigate runs best with docker installed on bare metal debian-based distributions. For ideal performance, Frigate needs access to underlying hardware for the Coral and GPU devices. Running Frigate in a VM on top of Proxmox, ESXi, Virtualbox, etc. is not recommended. The virtualization layer often introduces a sizable amount of overhead for communication with Coral devices, but [not in all circumstances](https://github.com/blakeblackshear/frigate/discussions/1837).
|
||||
|
||||
Windows is not officially supported, but some users have had success getting it to run under WSL or Virtualbox. Getting the GPU and/or Coral devices properly passed to Frigate may be difficult or impossible. Search previous discussions or issues for help.
|
||||
|
||||
### Storage
|
||||
|
||||
Frigate uses the following locations for read/write operations in the container. Docker volume mappings can be used to map these to any location on your host machine.
|
||||
|
||||
- `/media/frigate/clips`: Used for snapshot storage. In the future, it will likely be renamed from `clips` to `snapshots`. The file structure here cannot be modified and isn't intended to be browsed or managed manually.
|
||||
- `/media/frigate/recordings`: Internal system storage for recording segments. The file structure here cannot be modified and isn't intended to be browsed or managed manually.
|
||||
- `/media/frigate/frigate.db`: Default location for the sqlite database. You will also see several files alongside this file while frigate is running. If moving the database location (often needed when using a network drive at `/media/frigate`), it is recommended to mount a volume with docker at `/db` and change the storage location of the database to `/db/frigate.db` in the config file.
|
||||
- `/tmp/cache`: Cache location for recording segments. Initial recordings are written here before being checked and converted to mp4 and moved to the recordings folder.
|
||||
- `/dev/shm`: It is not recommended to modify this directory or map it with docker. This is the location for raw decoded frames in shared memory and it's size is impacted by the `shm-size` calculations below.
|
||||
- `/config/config.yml`: Default location of the config file.
|
||||
|
||||
#### Common docker compose storage configurations
|
||||
|
||||
Writing to a local disk or external USB drive:
|
||||
|
||||
```yaml
|
||||
version: "3.9"
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
volumes:
|
||||
- /path/to/your/config.yml:/config/config.yml:ro
|
||||
- /path/to/your/storage:/media/frigate
|
||||
- type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear
|
||||
target: /tmp/cache
|
||||
tmpfs:
|
||||
size: 1000000000
|
||||
...
|
||||
```
|
||||
|
||||
Writing to a network drive with database on a local drive:
|
||||
|
||||
```yaml
|
||||
version: "3.9"
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
volumes:
|
||||
- /path/to/your/config.yml:/config/config.yml:ro
|
||||
- /path/to/network/storage:/media/frigate
|
||||
- /path/to/local/disk:/db
|
||||
- type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear
|
||||
target: /tmp/cache
|
||||
tmpfs:
|
||||
size: 1000000000
|
||||
...
|
||||
```
|
||||
|
||||
frigate.yml
|
||||
|
||||
```yaml
|
||||
database:
|
||||
path: /db/frigate.db
|
||||
```
|
||||
|
||||
### Calculating required shm-size
|
||||
|
||||
Frigate utilizes shared memory to store frames during processing. The default `shm-size` provided by Docker is 64m.
|
||||
|
||||
The default shm-size of 64m is fine for setups with 2 or less 1080p cameras. If frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size.
|
||||
|
||||
You can calculate the necessary shm-size for each camera with the following formula:
|
||||
You can calculate the necessary shm-size for each camera with the following formula using the resolution specified for detect:
|
||||
|
||||
```
|
||||
(width * height * 1.5 * 9 + 270480)/1048576 = <shm size in mb>
|
||||
@@ -33,7 +90,7 @@ The shm size cannot be set per container for Home Assistant Addons. You must set
|
||||
|
||||
By default, the Raspberry Pi limits the amount of memory available to the GPU. In order to use ffmpeg hardware acceleration, you must increase the available memory by setting `gpu_mem` to the maximum recommended value in `config.txt` as described in the [official docs](https://www.raspberrypi.org/documentation/computers/config_txt.html#memory-options).
|
||||
|
||||
Additionally, the USB Coral draws a considerable amount of power. If using any other USB devices such as an SSD, you will experience instability due to the Pi not providing enough power to USB devices. You will need to purchase an external USB hub with it's own power supply. Some have reported success with [this](https://www.amazon.com/-/en/RSHTECH-Active-Splitter-Lightweight-Portable/dp/B091F7C5K4).
|
||||
Additionally, the USB Coral draws a considerable amount of power. If using any other USB devices such as an SSD, you will experience instability due to the Pi not providing enough power to USB devices. You will need to purchase an external USB hub with it's own power supply. Some have reported success with <a href="https://amzn.to/3a2mH0P" target="_blank" rel="nofollow noopener sponsored">this</a> (affiliate link).
|
||||
|
||||
## Docker
|
||||
|
||||
@@ -100,7 +157,7 @@ docker run -d \
|
||||
|
||||
:::caution
|
||||
|
||||
Due to limitations in Home Assistant Operating System, Frigate cannot utilize external storage for recordings or snapshots.
|
||||
Due to limitations in Home Assistant Operating System, utilizing external storage for recordings or snapshots requires [modifying udev rules manually](https://community.home-assistant.io/t/solved-mount-usb-drive-in-hassio-to-be-used-on-the-media-folder-with-udev-customization/258406/46).
|
||||
|
||||
:::
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
---
|
||||
id: home-assistant
|
||||
title: Integration with Home Assistant
|
||||
sidebar_label: Home Assistant
|
||||
title: Home Assistant Integration
|
||||
---
|
||||
|
||||
The best way to integrate with Home Assistant is to use the [official integration](https://github.com/blakeblackshear/frigate-hass-integration).
|
||||
@@ -77,131 +76,34 @@ Home Assistant > Configuration > Integrations > Frigate > Options
|
||||
|
||||
The integration provides:
|
||||
|
||||
- Rich UI with thumbnails for browsing event recordings
|
||||
- Rich UI for browsing 24/7 recordings by month, day, camera, time
|
||||
- Browsing event recordings with thumbnails
|
||||
- Browsing snapshots
|
||||
- Browsing recordings by month, day, camera, time
|
||||
|
||||
This is accessible via "Media Browser" on the left menu panel in Home Assistant.
|
||||
|
||||
<a name="api"></a>
|
||||
|
||||
## API
|
||||
## Notification API
|
||||
|
||||
- Notification API with public facing endpoints for images in notifications
|
||||
Many people do not want to expose Frigate to the web, so the integration creates some public API endpoints that can be used for notifications.
|
||||
|
||||
### Notifications
|
||||
|
||||
Frigate publishes event information in the form of a change feed via MQTT. This
|
||||
allows lots of customization for notifications to meet your needs. Event changes
|
||||
are published with `before` and `after` information as shown
|
||||
[here](#frigateevents). Note that some people may not want to expose frigate to
|
||||
the web, so you can leverage the HA API that frigate custom_integration ties
|
||||
into (which is exposed to the web, and thus can be used for mobile notifications
|
||||
etc):
|
||||
|
||||
To load an image taken by frigate from Home Assistants API see below:
|
||||
To load a thumbnail for an event:
|
||||
|
||||
```
|
||||
https://HA_URL/api/frigate/notifications/<event-id>/thumbnail.jpg
|
||||
```
|
||||
|
||||
To load a video clip taken by frigate from Home Assistants API :
|
||||
To load a snapshot for an event:
|
||||
|
||||
```
|
||||
https://HA_URL/api/frigate/notifications/<event-id>/<camera>/clip.mp4
|
||||
https://HA_URL/api/frigate/notifications/<event-id>/snapshot.jpg
|
||||
```
|
||||
|
||||
Here is a simple example of a notification automation of events which will update the existing notification for each change. This means the image you see in the notification will update as frigate finds a "better" image.
|
||||
To load a video clip of an event:
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: Notify of events
|
||||
trigger:
|
||||
platform: mqtt
|
||||
topic: frigate/events
|
||||
action:
|
||||
- service: notify.mobile_app_pixel_3
|
||||
data_template:
|
||||
message: 'A {{trigger.payload_json["after"]["label"]}} was detected.'
|
||||
data:
|
||||
image: 'https://your.public.hass.address.com/api/frigate/notifications/{{trigger.payload_json["after"]["id"]}}/thumbnail.jpg?format=android'
|
||||
tag: '{{trigger.payload_json["after"]["id"]}}'
|
||||
when: '{{trigger.payload_json["after"]["start_time"]|int}}'
|
||||
```
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: When a person enters a zone named yard
|
||||
trigger:
|
||||
platform: mqtt
|
||||
topic: frigate/events
|
||||
condition:
|
||||
- "{{ trigger.payload_json['after']['label'] == 'person' }}"
|
||||
- "{{ 'yard' in trigger.payload_json['after']['entered_zones'] }}"
|
||||
action:
|
||||
- service: notify.mobile_app_pixel_3
|
||||
data_template:
|
||||
message: "A {{trigger.payload_json['after']['label']}} has entered the yard."
|
||||
data:
|
||||
image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg"
|
||||
tag: "{{trigger.payload_json['after']['id']}}"
|
||||
when: '{{trigger.payload_json["after"]["start_time"]|int}}'
|
||||
```
|
||||
|
||||
```yaml
|
||||
- alias: When a person leaves a zone named yard
|
||||
trigger:
|
||||
platform: mqtt
|
||||
topic: frigate/events
|
||||
condition:
|
||||
- "{{ trigger.payload_json['after']['label'] == 'person' }}"
|
||||
- "{{ 'yard' in trigger.payload_json['before']['current_zones'] }}"
|
||||
- "{{ not 'yard' in trigger.payload_json['after']['current_zones'] }}"
|
||||
action:
|
||||
- service: notify.mobile_app_pixel_3
|
||||
data_template:
|
||||
message: "A {{trigger.payload_json['after']['label']}} has left the yard."
|
||||
data:
|
||||
image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg"
|
||||
tag: "{{trigger.payload_json['after']['id']}}"
|
||||
when: '{{trigger.payload_json["after"]["start_time"]|int}}'
|
||||
```
|
||||
|
||||
```yaml
|
||||
- alias: Notify for dogs in the front with a high top score
|
||||
trigger:
|
||||
platform: mqtt
|
||||
topic: frigate/events
|
||||
condition:
|
||||
- "{{ trigger.payload_json['after']['label'] == 'dog' }}"
|
||||
- "{{ trigger.payload_json['after']['camera'] == 'front' }}"
|
||||
- "{{ trigger.payload_json['after']['top_score'] > 0.98 }}"
|
||||
action:
|
||||
- service: notify.mobile_app_pixel_3
|
||||
data_template:
|
||||
message: "High confidence dog detection."
|
||||
data:
|
||||
image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg"
|
||||
tag: "{{trigger.payload_json['after']['id']}}"
|
||||
when: '{{trigger.payload_json["after"]["start_time"]|int}}'
|
||||
```
|
||||
|
||||
If you are using telegram, you can fetch the image directly from Frigate:
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: Notify of events
|
||||
trigger:
|
||||
platform: mqtt
|
||||
topic: frigate/events
|
||||
action:
|
||||
- service: notify.telegram_full
|
||||
data_template:
|
||||
message: 'A {{trigger.payload_json["after"]["label"]}} was detected.'
|
||||
data:
|
||||
photo:
|
||||
# this url should work for addon users
|
||||
- url: 'http://ccab4aaf-frigate:5000/api/events/{{trigger.payload_json["after"]["id"]}}/thumbnail.jpg'
|
||||
caption: 'A {{trigger.payload_json["after"]["label"]}} was detected on {{ trigger.payload_json["after"]["camera"] }} camera'
|
||||
https://HA_URL/api/frigate/notifications/<event-id>/clip.mp4
|
||||
```
|
||||
|
||||
<a name="streams"></a>
|
||||
@@ -282,6 +184,6 @@ which server they are referring to.
|
||||
|
||||
## FAQ
|
||||
|
||||
### If I am detecting multiple objects, how do I assign the correct `binary_sensor` to the camera in HomeKit?
|
||||
#### If I am detecting multiple objects, how do I assign the correct `binary_sensor` to the camera in HomeKit?
|
||||
|
||||
The [HomeKit integration](https://www.home-assistant.io/integrations/homekit/) randomly links one of the binary sensors (motion sensor entities) grouped with the camera device in Home Assistant. You can specify a `linked_motion_sensor` in the Home Assistant [HomeKit configuration](https://www.home-assistant.io/integrations/homekit/#linked_motion_sensor) for each camera.
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
id: howtos
|
||||
title: Community Guides
|
||||
sidebar_label: Community Guides
|
||||
---
|
||||
|
||||
## Communitiy Guides/How-To's
|
||||
|
||||
- Best Camera AI Person & Object Detection - How to Setup Frigate w/ Home Assistant - digiblurDIY [YouTube](https://youtu.be/V8vGdoYO6-Y) - [Article](https://www.digiblur.com/2021/05/how-to-setup-frigate-home-assistant.html)
|
||||
- Even More Free Local Object Detection with Home Assistant - Frigate Install - Everything Smart Home [YouTube](https://youtu.be/pqDCEZSVeRk)
|
||||
- Home Assistant Frigate integration for local image recognition - KPeyanski [YouTube](https://youtu.be/Q2UT78lFQpo) - [Article](https://peyanski.com/home-assistant-frigate-integration/)
|
||||
@@ -36,7 +36,7 @@ Message published for each changed event. The first message is published when th
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "update", // new, update, end or clip_ready
|
||||
"type": "update", // new, update, end
|
||||
"before": {
|
||||
"id": "1607123955.475377-mxklsc",
|
||||
"camera": "front_door",
|
||||
@@ -53,7 +53,9 @@ Message published for each changed event. The first message is published when th
|
||||
"region": [264, 450, 667, 853],
|
||||
"current_zones": ["driveway"],
|
||||
"entered_zones": ["yard", "driveway"],
|
||||
"thumbnail": null
|
||||
"thumbnail": null,
|
||||
"has_snapshot": false,
|
||||
"has_clip": false
|
||||
},
|
||||
"after": {
|
||||
"id": "1607123955.475377-mxklsc",
|
||||
@@ -71,7 +73,9 @@ Message published for each changed event. The first message is published when th
|
||||
"region": [218, 440, 693, 915],
|
||||
"current_zones": ["yard", "driveway"],
|
||||
"entered_zones": ["yard", "driveway"],
|
||||
"thumbnail": null
|
||||
"thumbnail": null,
|
||||
"has_snapshot": false,
|
||||
"has_clip": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -3,8 +3,8 @@ const path = require('path');
|
||||
module.exports = {
|
||||
title: 'Frigate',
|
||||
tagline: 'NVR With Realtime Object Detection for IP Cameras',
|
||||
url: 'https://blakeblackshear.github.io',
|
||||
baseUrl: '/frigate/',
|
||||
url: 'https://docs.frigate.video',
|
||||
baseUrl: '/',
|
||||
onBrokenLinks: 'throw',
|
||||
onBrokenMarkdownLinks: 'warn',
|
||||
favicon: 'img/favicon.ico',
|
||||
@@ -29,6 +29,16 @@ module.exports = {
|
||||
label: 'Docs',
|
||||
position: 'left',
|
||||
},
|
||||
{
|
||||
href: 'https://frigate.video',
|
||||
label: 'Website',
|
||||
position: 'right',
|
||||
},
|
||||
{
|
||||
href: 'https://demo.frigate.video',
|
||||
label: 'Demo',
|
||||
position: 'right',
|
||||
},
|
||||
{
|
||||
href: 'https://github.com/blakeblackshear/frigate',
|
||||
label: 'GitHub',
|
||||
|
||||
54
docs/package-lock.json
generated
@@ -2469,31 +2469,11 @@
|
||||
"integrity": "sha1-l6ERlkmyEa0zaR2fn0hqjsn74KM="
|
||||
},
|
||||
"ansi-align": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.0.tgz",
|
||||
"integrity": "sha512-ZpClVKqXN3RGBmKibdfWzqCY4lnjEuoNzU5T0oEFpfd/z5qJHVarukridD4juLO2FXMiwUQxr9WqQtaYa8XRYw==",
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz",
|
||||
"integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==",
|
||||
"requires": {
|
||||
"string-width": "^3.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"string-width": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz",
|
||||
"integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==",
|
||||
"requires": {
|
||||
"emoji-regex": "^7.0.1",
|
||||
"is-fullwidth-code-point": "^2.0.0",
|
||||
"strip-ansi": "^5.1.0"
|
||||
}
|
||||
},
|
||||
"strip-ansi": {
|
||||
"version": "5.2.0",
|
||||
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
|
||||
"integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
|
||||
"requires": {
|
||||
"ansi-regex": "^4.1.0"
|
||||
}
|
||||
}
|
||||
"string-width": "^4.1.0"
|
||||
}
|
||||
},
|
||||
"ansi-colors": {
|
||||
@@ -2902,15 +2882,15 @@
|
||||
"integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24="
|
||||
},
|
||||
"boxen": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/boxen/-/boxen-5.0.1.tgz",
|
||||
"integrity": "sha512-49VBlw+PrWEF51aCmy7QIteYPIFZxSpvqBdP/2itCPPlJ49kj9zg/XPRFrdkne2W+CfwXUls8exMvu1RysZpKA==",
|
||||
"version": "5.1.2",
|
||||
"resolved": "https://registry.npmjs.org/boxen/-/boxen-5.1.2.tgz",
|
||||
"integrity": "sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ==",
|
||||
"requires": {
|
||||
"ansi-align": "^3.0.0",
|
||||
"camelcase": "^6.2.0",
|
||||
"chalk": "^4.1.0",
|
||||
"cli-boxes": "^2.2.1",
|
||||
"string-width": "^4.2.0",
|
||||
"string-width": "^4.2.2",
|
||||
"type-fest": "^0.20.2",
|
||||
"widest-line": "^3.1.0",
|
||||
"wrap-ansi": "^7.0.0"
|
||||
@@ -6826,9 +6806,9 @@
|
||||
"integrity": "sha1-y480xTIT2JVyP8urkH6UIq28r7E="
|
||||
},
|
||||
"nth-check": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.0.0.tgz",
|
||||
"integrity": "sha512-i4sc/Kj8htBrAiH1viZ0TgU8Y5XqCaV/FziYK6TBczxmeKm3AEFWqqF3195yKudrarqy7Zu80Ra5dobFjn9X/Q==",
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.0.1.tgz",
|
||||
"integrity": "sha512-it1vE95zF6dTT9lBsYbxvqh0Soy4SPowchj0UBGj/V6cTPnXXtQOPUbhZ6CmGzAD/rW22LQK6E96pcdJXk4A4w==",
|
||||
"requires": {
|
||||
"boolbase": "^1.0.0"
|
||||
}
|
||||
@@ -7650,9 +7630,9 @@
|
||||
"integrity": "sha512-w23ch4f75V1Tnz8DajsYKvY5lF7H1+WvzvLUcF0paFxkTHSp42RS0H5CttdN2Q8RR3DRGZ9v5xD/h3n8C8kGmg=="
|
||||
},
|
||||
"prismjs": {
|
||||
"version": "1.24.1",
|
||||
"resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.24.1.tgz",
|
||||
"integrity": "sha512-mNPsedLuk90RVJioIky8ANZEwYm5w9LcvCXrxHlwf4fNVSn8jEipMybMkWUyyF0JhnC+C4VcOVSBuHRKs1L5Ow=="
|
||||
"version": "1.25.0",
|
||||
"resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.25.0.tgz",
|
||||
"integrity": "sha512-WCjJHl1KEWbnkQom1+SzftbtXMKQoezOCYs5rECqMN+jP+apI7ftoflyqigqzopSO3hMhTEb0mFClA8lkolgEg=="
|
||||
},
|
||||
"process-nextick-args": {
|
||||
"version": "2.0.1",
|
||||
@@ -9397,9 +9377,9 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"ansi-regex": {
|
||||
"version": "5.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz",
|
||||
"integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg=="
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
|
||||
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
"clear": "docusaurus clear"
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "^2.0.0-beta.ff31de0ff",
|
||||
"@docusaurus/preset-classic": "^2.0.0-beta.ff31de0ff",
|
||||
"@docusaurus/core": "^2.0.0-beta.6",
|
||||
"@docusaurus/preset-classic": "^2.0.0-beta.6",
|
||||
"@mdx-js/react": "^1.6.21",
|
||||
"clsx": "^1.1.1",
|
||||
"raw-loader": "^4.0.2",
|
||||
|
||||
@@ -9,6 +9,8 @@ module.exports = {
|
||||
'guides/camera_setup',
|
||||
'guides/getting_started',
|
||||
'guides/false_positives',
|
||||
'guides/ha_notifications',
|
||||
'guides/stationary_objects',
|
||||
],
|
||||
Configuration: [
|
||||
'configuration/index',
|
||||
|
||||
BIN
docs/static/img/driveway_zones-min.png
vendored
Normal file
|
After Width: | Height: | Size: 195 KiB |
BIN
docs/static/img/driveway_zones.png
vendored
Normal file
|
After Width: | Height: | Size: 1.6 MiB |
BIN
docs/static/img/example-mask-poly-min.png
vendored
Normal file
|
After Width: | Height: | Size: 113 KiB |
BIN
docs/static/img/media_browser-min.png
vendored
Normal file
|
After Width: | Height: | Size: 133 KiB |
BIN
docs/static/img/mismatched-resolution-min.jpg
vendored
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
docs/static/img/notification-min.png
vendored
Normal file
|
After Width: | Height: | Size: 98 KiB |
BIN
docs/static/img/reolink-settings.png
vendored
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
docs/static/img/resolutions-min.jpg
vendored
Normal file
|
After Width: | Height: | Size: 48 KiB |
@@ -12,6 +12,7 @@ import yaml
|
||||
from peewee_migrate import Router
|
||||
from playhouse.sqlite_ext import SqliteExtDatabase
|
||||
from playhouse.sqliteq import SqliteQueueDatabase
|
||||
from pydantic import ValidationError
|
||||
|
||||
from frigate.config import DetectorTypeEnum, FrigateConfig
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
|
||||
@@ -20,14 +21,14 @@ from frigate.events import EventCleanup, EventProcessor
|
||||
from frigate.http import create_app
|
||||
from frigate.log import log_process, root_configurer
|
||||
from frigate.models import Event, Recordings
|
||||
from frigate.mqtt import create_mqtt_client, MqttSocketRelay
|
||||
from frigate.mqtt import MqttSocketRelay, create_mqtt_client
|
||||
from frigate.object_processing import TrackedObjectProcessor
|
||||
from frigate.output import output_frames
|
||||
from frigate.record import RecordingCleanup, RecordingMaintainer
|
||||
from frigate.stats import StatsEmitter, stats_init
|
||||
from frigate.version import VERSION
|
||||
from frigate.video import capture_camera, track_camera
|
||||
from frigate.watchdog import FrigateWatchdog
|
||||
from frigate.zeroconf import broadcast_zeroconf
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -66,6 +67,12 @@ class FrigateApp:
|
||||
|
||||
def init_config(self):
|
||||
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
|
||||
user_config = FrigateConfig.parse_file(config_file)
|
||||
self.config = user_config.runtime_config
|
||||
|
||||
@@ -85,29 +92,6 @@ class FrigateApp:
|
||||
"frame_queue": mp.Queue(maxsize=2),
|
||||
}
|
||||
|
||||
def check_config(self):
|
||||
for name, camera in self.config.cameras.items():
|
||||
assigned_roles = list(
|
||||
set([r for i in camera.ffmpeg.inputs for r in i.roles])
|
||||
)
|
||||
if not camera.record.enabled and "record" in assigned_roles:
|
||||
logger.warning(
|
||||
f"Camera {name} has record assigned to an input, but record is not enabled."
|
||||
)
|
||||
elif camera.record.enabled and not "record" in assigned_roles:
|
||||
logger.warning(
|
||||
f"Camera {name} has record enabled, but record is not assigned to an input."
|
||||
)
|
||||
|
||||
if not camera.rtmp.enabled and "rtmp" in assigned_roles:
|
||||
logger.warning(
|
||||
f"Camera {name} has rtmp assigned to an input, but rtmp is not enabled."
|
||||
)
|
||||
elif camera.rtmp.enabled and not "rtmp" in assigned_roles:
|
||||
logger.warning(
|
||||
f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input."
|
||||
)
|
||||
|
||||
def set_log_levels(self):
|
||||
logging.getLogger().setLevel(self.config.logger.default.value.upper())
|
||||
for log, level in self.config.logger.logs.items():
|
||||
@@ -127,6 +111,9 @@ class FrigateApp:
|
||||
maxsize=len(self.config.cameras.keys()) * 2
|
||||
)
|
||||
|
||||
# Queue for recordings info
|
||||
self.recordings_info_queue = mp.Queue()
|
||||
|
||||
def init_database(self):
|
||||
# Migrate DB location
|
||||
old_db_path = os.path.join(CLIPS_DIR, "frigate.db")
|
||||
@@ -225,6 +212,7 @@ class FrigateApp:
|
||||
self.event_queue,
|
||||
self.event_processed_queue,
|
||||
self.video_output_queue,
|
||||
self.recordings_info_queue,
|
||||
self.stop_event,
|
||||
)
|
||||
self.detected_frames_processor.start()
|
||||
@@ -292,7 +280,9 @@ class FrigateApp:
|
||||
self.event_cleanup.start()
|
||||
|
||||
def start_recording_maintainer(self):
|
||||
self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event)
|
||||
self.recording_maintainer = RecordingMaintainer(
|
||||
self.config, self.recordings_info_queue, self.stop_event
|
||||
)
|
||||
self.recording_maintainer.start()
|
||||
|
||||
def start_recording_cleanup(self):
|
||||
@@ -315,16 +305,28 @@ class FrigateApp:
|
||||
|
||||
def start(self):
|
||||
self.init_logger()
|
||||
logger.info(f"Starting Frigate ({VERSION})")
|
||||
try:
|
||||
try:
|
||||
self.init_config()
|
||||
except Exception as e:
|
||||
print(f"Error parsing config: {e}")
|
||||
print("*************************************************************")
|
||||
print("*************************************************************")
|
||||
print("*** Your config file is not valid! ***")
|
||||
print("*** Please check the docs at ***")
|
||||
print("*** https://docs.frigate.video/configuration/index ***")
|
||||
print("*************************************************************")
|
||||
print("*************************************************************")
|
||||
print("*** Config Validation Errors ***")
|
||||
print("*************************************************************")
|
||||
print(e)
|
||||
print("*************************************************************")
|
||||
print("*** End Config Validation Errors ***")
|
||||
print("*************************************************************")
|
||||
self.log_process.terminate()
|
||||
sys.exit(1)
|
||||
self.set_environment_vars()
|
||||
self.ensure_dirs()
|
||||
self.check_config()
|
||||
self.set_log_levels()
|
||||
self.init_queues()
|
||||
self.init_database()
|
||||
|
||||
BIN
frigate/birdseye.png
Normal file
|
After Width: | Height: | Size: 3.3 KiB |
@@ -12,9 +12,8 @@ import yaml
|
||||
from pydantic import BaseModel, Extra, Field, validator
|
||||
from pydantic.fields import PrivateAttr
|
||||
|
||||
from frigate.const import BASE_DIR, CACHE_DIR, RECORD_DIR
|
||||
from frigate.edgetpu import load_labels
|
||||
from frigate.util import create_mask, deep_merge
|
||||
from frigate.const import BASE_DIR, CACHE_DIR, YAML_EXT
|
||||
from frigate.util import create_mask, deep_merge, load_labels
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -65,9 +64,16 @@ class MqttConfig(FrigateBaseModel):
|
||||
return v
|
||||
|
||||
|
||||
class RetainModeEnum(str, Enum):
|
||||
all = "all"
|
||||
motion = "motion"
|
||||
active_objects = "active_objects"
|
||||
|
||||
|
||||
class RetainConfig(FrigateBaseModel):
|
||||
default: int = Field(default=10, title="Default retention period.")
|
||||
objects: Dict[str, int] = Field(
|
||||
default: float = Field(default=10, title="Default retention period.")
|
||||
mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
|
||||
objects: Dict[str, float] = Field(
|
||||
default_factory=dict, title="Object retention period."
|
||||
)
|
||||
|
||||
@@ -88,9 +94,22 @@ class EventsConfig(FrigateBaseModel):
|
||||
)
|
||||
|
||||
|
||||
class RecordRetainConfig(FrigateBaseModel):
|
||||
days: float = Field(default=0, title="Default retention period.")
|
||||
mode: RetainModeEnum = Field(default=RetainModeEnum.all, title="Retain mode.")
|
||||
|
||||
|
||||
class RecordConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable record on all cameras.")
|
||||
retain_days: int = Field(default=0, title="Recording retention period in days.")
|
||||
expire_interval: int = Field(
|
||||
default=60,
|
||||
title="Number of minutes to wait between cleanup runs.",
|
||||
)
|
||||
# deprecated - to be removed in a future version
|
||||
retain_days: Optional[float] = Field(title="Recording retention period in days.")
|
||||
retain: RecordRetainConfig = Field(
|
||||
default_factory=RecordRetainConfig, title="Record retention settings."
|
||||
)
|
||||
events: EventsConfig = Field(
|
||||
default_factory=EventsConfig, title="Event specific settings."
|
||||
)
|
||||
@@ -103,10 +122,10 @@ class MotionConfig(FrigateBaseModel):
|
||||
ge=1,
|
||||
le=255,
|
||||
)
|
||||
contour_area: Optional[int] = Field(title="Contour Area")
|
||||
contour_area: Optional[int] = Field(default=30, title="Contour Area")
|
||||
delta_alpha: float = Field(default=0.2, title="Delta Alpha")
|
||||
frame_alpha: float = Field(default=0.2, title="Frame Alpha")
|
||||
frame_height: Optional[int] = Field(title="Frame Height")
|
||||
frame_height: Optional[int] = Field(default=50, title="Frame Height")
|
||||
mask: Union[str, List[str]] = Field(
|
||||
default="", title="Coordinates polygon for the motion mask."
|
||||
)
|
||||
@@ -119,15 +138,6 @@ class RuntimeMotionConfig(MotionConfig):
|
||||
def __init__(self, **config):
|
||||
frame_shape = config.get("frame_shape", (1, 1))
|
||||
|
||||
if "frame_height" not in config:
|
||||
config["frame_height"] = max(frame_shape[0] // 6, 180)
|
||||
|
||||
if "contour_area" not in config:
|
||||
frame_width = frame_shape[1] * config["frame_height"] / frame_shape[0]
|
||||
config["contour_area"] = (
|
||||
config["frame_height"] * frame_width * 0.00173611111
|
||||
)
|
||||
|
||||
mask = config.get("mask", "")
|
||||
config["raw_mask"] = mask
|
||||
|
||||
@@ -162,6 +172,11 @@ class DetectConfig(FrigateBaseModel):
|
||||
max_disappeared: Optional[int] = Field(
|
||||
title="Maximum number of frames the object can dissapear before detection ends."
|
||||
)
|
||||
stationary_interval: Optional[int] = Field(
|
||||
default=0,
|
||||
title="Frame interval for checking stationary objects.",
|
||||
ge=0,
|
||||
)
|
||||
|
||||
|
||||
class FilterConfig(FrigateBaseModel):
|
||||
@@ -461,7 +476,7 @@ class CameraLiveConfig(FrigateBaseModel):
|
||||
|
||||
|
||||
class CameraConfig(FrigateBaseModel):
|
||||
name: Optional[str] = Field(title="Camera name.")
|
||||
name: Optional[str] = Field(title="Camera name.", regex="^[a-zA-Z0-9_]+$")
|
||||
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
|
||||
best_image_timeout: int = Field(
|
||||
default=60,
|
||||
@@ -495,6 +510,7 @@ class CameraConfig(FrigateBaseModel):
|
||||
timestamp_style: TimestampStyleConfig = Field(
|
||||
default_factory=TimestampStyleConfig, title="Timestamp style configuration."
|
||||
)
|
||||
_ffmpeg_cmds: List[Dict[str, List[str]]] = PrivateAttr()
|
||||
|
||||
def __init__(self, **config):
|
||||
# Set zone colors
|
||||
@@ -505,6 +521,10 @@ class CameraConfig(FrigateBaseModel):
|
||||
for idx, (name, z) in enumerate(config["zones"].items())
|
||||
}
|
||||
|
||||
# add roles to the input if there is only one
|
||||
if len(config["ffmpeg"]["inputs"]) == 1:
|
||||
config["ffmpeg"]["inputs"][0]["roles"] = ["record", "rtmp", "detect"]
|
||||
|
||||
super().__init__(**config)
|
||||
|
||||
@property
|
||||
@@ -517,6 +537,11 @@ class CameraConfig(FrigateBaseModel):
|
||||
|
||||
@property
|
||||
def ffmpeg_cmds(self) -> List[Dict[str, List[str]]]:
|
||||
return self._ffmpeg_cmds
|
||||
|
||||
def create_ffmpeg_cmds(self):
|
||||
if "_ffmpeg_cmds" in self:
|
||||
return
|
||||
ffmpeg_cmds = []
|
||||
for ffmpeg_input in self.ffmpeg.inputs:
|
||||
ffmpeg_cmd = self._get_ffmpeg_cmd(ffmpeg_input)
|
||||
@@ -524,7 +549,7 @@ class CameraConfig(FrigateBaseModel):
|
||||
continue
|
||||
|
||||
ffmpeg_cmds.append({"roles": ffmpeg_input.roles, "cmd": ffmpeg_cmd})
|
||||
return ffmpeg_cmds
|
||||
self._ffmpeg_cmds = ffmpeg_cmds
|
||||
|
||||
def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
|
||||
ffmpeg_output_args = []
|
||||
@@ -560,6 +585,7 @@ class CameraConfig(FrigateBaseModel):
|
||||
if isinstance(self.ffmpeg.output_args.record, list)
|
||||
else self.ffmpeg.output_args.record.split(" ")
|
||||
)
|
||||
|
||||
ffmpeg_output_args = (
|
||||
record_args
|
||||
+ [f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4"]
|
||||
@@ -618,7 +644,7 @@ class ModelConfig(FrigateBaseModel):
|
||||
return self._merged_labelmap
|
||||
|
||||
@property
|
||||
def colormap(self) -> Dict[int, tuple[int, int, int]]:
|
||||
def colormap(self) -> Dict[int, Tuple[int, int, int]]:
|
||||
return self._colormap
|
||||
|
||||
def __init__(self, **config):
|
||||
@@ -677,6 +703,9 @@ class FrigateConfig(FrigateBaseModel):
|
||||
snapshots: SnapshotsConfig = Field(
|
||||
default_factory=SnapshotsConfig, title="Global snapshots configuration."
|
||||
)
|
||||
live: CameraLiveConfig = Field(
|
||||
default_factory=CameraLiveConfig, title="Global live configuration."
|
||||
)
|
||||
rtmp: RtmpConfig = Field(
|
||||
default_factory=RtmpConfig, title="Global RTMP restreaming configuration."
|
||||
)
|
||||
@@ -715,6 +744,7 @@ class FrigateConfig(FrigateBaseModel):
|
||||
include={
|
||||
"record": ...,
|
||||
"snapshots": ...,
|
||||
"live": ...,
|
||||
"rtmp": ...,
|
||||
"objects": ...,
|
||||
"motion": ...,
|
||||
@@ -783,6 +813,40 @@ class FrigateConfig(FrigateBaseModel):
|
||||
**camera_config.motion.dict(exclude_unset=True),
|
||||
)
|
||||
|
||||
# check runtime config
|
||||
assigned_roles = list(
|
||||
set([r for i in camera_config.ffmpeg.inputs for r in i.roles])
|
||||
)
|
||||
if camera_config.record.enabled and not "record" in assigned_roles:
|
||||
raise ValueError(
|
||||
f"Camera {name} has record enabled, but record is not assigned to an input."
|
||||
)
|
||||
|
||||
if camera_config.rtmp.enabled and not "rtmp" in assigned_roles:
|
||||
raise ValueError(
|
||||
f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input."
|
||||
)
|
||||
|
||||
# backwards compatibility for retain_days
|
||||
if not camera_config.record.retain_days is None:
|
||||
logger.warning(
|
||||
"The 'retain_days' config option has been DEPRECATED and will be removed in a future version. Please use the 'days' setting under 'retain'"
|
||||
)
|
||||
if camera_config.record.retain.days == 0:
|
||||
camera_config.record.retain.days = camera_config.record.retain_days
|
||||
|
||||
# warning if the higher level record mode is potentially more restrictive than the events
|
||||
if (
|
||||
camera_config.record.retain.days != 0
|
||||
and camera_config.record.retain.mode != RetainModeEnum.all
|
||||
and camera_config.record.events.retain.mode
|
||||
!= camera_config.record.retain.mode
|
||||
):
|
||||
logger.warning(
|
||||
f"Recording retention is configured for {camera_config.record.retain.mode} and event retention is configured for {camera_config.record.events.retain.mode}. The more restrictive retention policy will be applied."
|
||||
)
|
||||
# generage the ffmpeg commands
|
||||
camera_config.create_ffmpeg_cmds()
|
||||
config.cameras[name] = camera_config
|
||||
|
||||
return config
|
||||
@@ -800,7 +864,7 @@ class FrigateConfig(FrigateBaseModel):
|
||||
with open(config_file) as f:
|
||||
raw_config = f.read()
|
||||
|
||||
if config_file.endswith(".yml"):
|
||||
if config_file.endswith(YAML_EXT):
|
||||
config = yaml.safe_load(raw_config)
|
||||
elif config_file.endswith(".json"):
|
||||
config = json.loads(raw_config)
|
||||
|
||||
@@ -2,3 +2,4 @@ BASE_DIR = "/media/frigate"
|
||||
CLIPS_DIR = f"{BASE_DIR}/clips"
|
||||
RECORD_DIR = f"{BASE_DIR}/recordings"
|
||||
CACHE_DIR = "/tmp/cache"
|
||||
YAML_EXT = (".yaml", ".yml")
|
||||
|
||||
@@ -13,31 +13,11 @@ import tflite_runtime.interpreter as tflite
|
||||
from setproctitle import setproctitle
|
||||
from tflite_runtime.interpreter import load_delegate
|
||||
|
||||
from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen
|
||||
from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen, load_labels
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def load_labels(path, encoding="utf-8"):
|
||||
"""Loads labels from file (with or without index numbers).
|
||||
Args:
|
||||
path: path to label file.
|
||||
encoding: label file encoding.
|
||||
Returns:
|
||||
Dictionary mapping indices to labels.
|
||||
"""
|
||||
with open(path, "r", encoding=encoding) as f:
|
||||
lines = f.readlines()
|
||||
if not lines:
|
||||
return {}
|
||||
|
||||
if lines[0].split(" ", maxsplit=1)[0].isdigit():
|
||||
pairs = [line.split(" ", maxsplit=1) for line in lines]
|
||||
return {int(index): label.strip() for index, label in pairs}
|
||||
else:
|
||||
return {index: line.strip() for index, line in enumerate(lines)}
|
||||
|
||||
|
||||
class ObjectDetector(ABC):
|
||||
@abstractmethod
|
||||
def detect(self, tensor_input, threshold=0.4):
|
||||
|
||||
@@ -29,39 +29,12 @@ class EventProcessor(threading.Thread):
|
||||
self.events_in_process = {}
|
||||
self.stop_event = stop_event
|
||||
|
||||
def should_create_clip(self, camera, event_data):
|
||||
if event_data["false_positive"]:
|
||||
return False
|
||||
|
||||
record_config: RecordConfig = self.config.cameras[camera].record
|
||||
|
||||
# Recording is disabled
|
||||
if not record_config.enabled:
|
||||
return False
|
||||
|
||||
# If there are required zones and there is no overlap
|
||||
required_zones = record_config.events.required_zones
|
||||
if len(required_zones) > 0 and not set(event_data["entered_zones"]) & set(
|
||||
required_zones
|
||||
):
|
||||
logger.debug(
|
||||
f"Not creating clip for {event_data['id']} because it did not enter required zones"
|
||||
)
|
||||
return False
|
||||
|
||||
# If the required objects are not present
|
||||
if (
|
||||
record_config.events.objects is not None
|
||||
and event_data["label"] not in record_config.events.objects
|
||||
):
|
||||
logger.debug(
|
||||
f"Not creating clip for {event_data['id']} because it did not contain required objects"
|
||||
)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def run(self):
|
||||
# set an end_time on events without an end_time on startup
|
||||
Event.update(end_time=Event.start_time + 30).where(
|
||||
Event.end_time == None
|
||||
).execute()
|
||||
|
||||
while not self.stop_event.is_set():
|
||||
try:
|
||||
event_type, camera, event_data = self.event_queue.get(timeout=10)
|
||||
@@ -70,16 +43,35 @@ class EventProcessor(threading.Thread):
|
||||
|
||||
logger.debug(f"Event received: {event_type} {camera} {event_data['id']}")
|
||||
|
||||
event_config: EventsConfig = self.config.cameras[camera].record.events
|
||||
|
||||
if event_type == "start":
|
||||
self.events_in_process[event_data["id"]] = event_data
|
||||
|
||||
if event_type == "end":
|
||||
has_clip = self.should_create_clip(camera, event_data)
|
||||
elif event_type == "update":
|
||||
self.events_in_process[event_data["id"]] = event_data
|
||||
# TODO: this will generate a lot of db activity possibly
|
||||
if event_data["has_clip"] or event_data["has_snapshot"]:
|
||||
Event.replace(
|
||||
id=event_data["id"],
|
||||
label=event_data["label"],
|
||||
camera=camera,
|
||||
start_time=event_data["start_time"] - event_config.pre_capture,
|
||||
end_time=None,
|
||||
top_score=event_data["top_score"],
|
||||
false_positive=event_data["false_positive"],
|
||||
zones=list(event_data["entered_zones"]),
|
||||
thumbnail=event_data["thumbnail"],
|
||||
region=event_data["region"],
|
||||
box=event_data["box"],
|
||||
area=event_data["area"],
|
||||
has_clip=event_data["has_clip"],
|
||||
has_snapshot=event_data["has_snapshot"],
|
||||
).execute()
|
||||
|
||||
event_config: EventsConfig = self.config.cameras[camera].record.events
|
||||
|
||||
if has_clip or event_data["has_snapshot"]:
|
||||
Event.create(
|
||||
elif event_type == "end":
|
||||
if event_data["has_clip"] or event_data["has_snapshot"]:
|
||||
Event.replace(
|
||||
id=event_data["id"],
|
||||
label=event_data["label"],
|
||||
camera=camera,
|
||||
@@ -89,13 +81,20 @@ class EventProcessor(threading.Thread):
|
||||
false_positive=event_data["false_positive"],
|
||||
zones=list(event_data["entered_zones"]),
|
||||
thumbnail=event_data["thumbnail"],
|
||||
has_clip=has_clip,
|
||||
region=event_data["region"],
|
||||
box=event_data["box"],
|
||||
area=event_data["area"],
|
||||
has_clip=event_data["has_clip"],
|
||||
has_snapshot=event_data["has_snapshot"],
|
||||
)
|
||||
).execute()
|
||||
|
||||
del self.events_in_process[event_data["id"]]
|
||||
self.event_processed_queue.put((event_data["id"], camera, has_clip))
|
||||
self.event_processed_queue.put((event_data["id"], camera))
|
||||
|
||||
# set an end_time on events without an end_time before exiting
|
||||
Event.update(end_time=datetime.datetime.now().timestamp()).where(
|
||||
Event.end_time == None
|
||||
).execute()
|
||||
logger.info(f"Exiting event processor...")
|
||||
|
||||
|
||||
@@ -223,12 +222,12 @@ class EventCleanup(threading.Thread):
|
||||
for event in duplicate_events:
|
||||
logger.debug(f"Removing duplicate: {event.id}")
|
||||
media_name = f"{event.camera}-{event.id}"
|
||||
if event.has_snapshot:
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||
media_path.unlink(missing_ok=True)
|
||||
if event.has_clip:
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
|
||||
media_path.unlink(missing_ok=True)
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||
media_path.unlink(missing_ok=True)
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
|
||||
media_path.unlink(missing_ok=True)
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
|
||||
media_path.unlink(missing_ok=True)
|
||||
|
||||
(
|
||||
Event.delete()
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import base64
|
||||
from collections import OrderedDict
|
||||
from datetime import datetime, timedelta
|
||||
import copy
|
||||
import json
|
||||
import glob
|
||||
import logging
|
||||
@@ -132,6 +133,8 @@ def delete_event(id):
|
||||
if event.has_snapshot:
|
||||
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||
media.unlink(missing_ok=True)
|
||||
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
|
||||
media.unlink(missing_ok=True)
|
||||
if event.has_clip:
|
||||
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
|
||||
media.unlink(missing_ok=True)
|
||||
@@ -181,7 +184,7 @@ def event_thumbnail(id):
|
||||
thumbnail_bytes = jpg.tobytes()
|
||||
|
||||
response = make_response(thumbnail_bytes)
|
||||
response.headers["Content-Type"] = "image/jpg"
|
||||
response.headers["Content-Type"] = "image/jpeg"
|
||||
return response
|
||||
|
||||
|
||||
@@ -190,7 +193,7 @@ def event_snapshot(id):
|
||||
download = request.args.get("download", type=bool)
|
||||
jpg_bytes = None
|
||||
try:
|
||||
event = Event.get(Event.id == id)
|
||||
event = Event.get(Event.id == id, Event.end_time != None)
|
||||
if not event.has_snapshot:
|
||||
return "Snapshot not available", 404
|
||||
# read snapshot from disk
|
||||
@@ -222,7 +225,7 @@ def event_snapshot(id):
|
||||
return "Event not found", 404
|
||||
|
||||
response = make_response(jpg_bytes)
|
||||
response.headers["Content-Type"] = "image/jpg"
|
||||
response.headers["Content-Type"] = "image/jpeg"
|
||||
if download:
|
||||
response.headers[
|
||||
"Content-Disposition"
|
||||
@@ -321,7 +324,7 @@ def config():
|
||||
# add in the ffmpeg_cmds
|
||||
for camera_name, camera in current_app.frigate_config.cameras.items():
|
||||
camera_dict = config["cameras"][camera_name]
|
||||
camera_dict["ffmpeg_cmds"] = camera.ffmpeg_cmds
|
||||
camera_dict["ffmpeg_cmds"] = copy.deepcopy(camera.ffmpeg_cmds)
|
||||
for cmd in camera_dict["ffmpeg_cmds"]:
|
||||
cmd["cmd"] = " ".join(cmd["cmd"])
|
||||
|
||||
@@ -358,9 +361,10 @@ def best(camera_name, label):
|
||||
|
||||
crop = bool(request.args.get("crop", 0, type=int))
|
||||
if crop:
|
||||
box = best_object.get("box", (0, 0, 300, 300))
|
||||
box_size = 300
|
||||
box = best_object.get("box", (0, 0, box_size, box_size))
|
||||
region = calculate_region(
|
||||
best_frame.shape, box[0], box[1], box[2], box[3], 1.1
|
||||
best_frame.shape, box[0], box[1], box[2], box[3], box_size, multiplier=1.1
|
||||
)
|
||||
best_frame = best_frame[region[1] : region[3], region[0] : region[2]]
|
||||
|
||||
@@ -375,7 +379,7 @@ def best(camera_name, label):
|
||||
".jpg", best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), resize_quality]
|
||||
)
|
||||
response = make_response(jpg.tobytes())
|
||||
response.headers["Content-Type"] = "image/jpg"
|
||||
response.headers["Content-Type"] = "image/jpeg"
|
||||
return response
|
||||
else:
|
||||
return "Camera named {} not found".format(camera_name), 404
|
||||
@@ -437,7 +441,7 @@ def latest_frame(camera_name):
|
||||
".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), resize_quality]
|
||||
)
|
||||
response = make_response(jpg.tobytes())
|
||||
response.headers["Content-Type"] = "image/jpg"
|
||||
response.headers["Content-Type"] = "image/jpeg"
|
||||
return response
|
||||
else:
|
||||
return "Camera named {} not found".format(camera_name), 404
|
||||
@@ -657,10 +661,15 @@ def vod_ts(camera, start_ts, end_ts):
|
||||
# Determine if we need to end the last clip early
|
||||
if recording.end_time > end_ts:
|
||||
duration -= int((recording.end_time - end_ts) * 1000)
|
||||
clips.append(clip)
|
||||
durations.append(duration)
|
||||
|
||||
if duration > 0:
|
||||
clips.append(clip)
|
||||
durations.append(duration)
|
||||
else:
|
||||
logger.warning(f"Recording clip is missing or empty: {recording.path}")
|
||||
|
||||
if not clips:
|
||||
logger.error("No recordings found for the requested time range")
|
||||
return "No recordings found.", 404
|
||||
|
||||
hour_ago = datetime.now() - timedelta(hours=1)
|
||||
@@ -689,15 +698,20 @@ def vod_event(id):
|
||||
try:
|
||||
event: Event = Event.get(Event.id == id)
|
||||
except DoesNotExist:
|
||||
logger.error(f"Event not found: {id}")
|
||||
return "Event not found.", 404
|
||||
|
||||
if not event.has_clip:
|
||||
return "Clip not available", 404
|
||||
logger.error(f"Event does not have recordings: {id}")
|
||||
return "Recordings not available", 404
|
||||
|
||||
clip_path = os.path.join(CLIPS_DIR, f"{event.camera}-{id}.mp4")
|
||||
|
||||
if not os.path.isfile(clip_path):
|
||||
return vod_ts(event.camera, event.start_time, event.end_time)
|
||||
end_ts = (
|
||||
datetime.now().timestamp() if event.end_time is None else event.end_time
|
||||
)
|
||||
return vod_ts(event.camera, event.start_time, end_ts)
|
||||
|
||||
duration = int((event.end_time - event.start_time) * 1000)
|
||||
return jsonify(
|
||||
|
||||
@@ -15,6 +15,9 @@ class Event(Model):
|
||||
thumbnail = TextField()
|
||||
has_clip = BooleanField(default=True)
|
||||
has_snapshot = BooleanField(default=True)
|
||||
region = JSONField()
|
||||
box = JSONField()
|
||||
area = IntegerField()
|
||||
|
||||
|
||||
class Recordings(Model):
|
||||
@@ -24,3 +27,5 @@ class Recordings(Model):
|
||||
start_time = DateTimeField()
|
||||
end_time = DateTimeField()
|
||||
duration = FloatField()
|
||||
motion = IntegerField(null=True)
|
||||
objects = IntegerField(null=True)
|
||||
|
||||
@@ -23,6 +23,7 @@ class MotionDetector:
|
||||
interpolation=cv2.INTER_LINEAR,
|
||||
)
|
||||
self.mask = np.where(resized_mask == [0])
|
||||
self.save_images = False
|
||||
|
||||
def detect(self, frame):
|
||||
motion_boxes = []
|
||||
@@ -36,10 +37,15 @@ class MotionDetector:
|
||||
interpolation=cv2.INTER_LINEAR,
|
||||
)
|
||||
|
||||
# TODO: can I improve the contrast of the grayscale image here?
|
||||
|
||||
# convert to grayscale
|
||||
# resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2GRAY)
|
||||
# Improve contrast
|
||||
minval = np.percentile(resized_frame, 4)
|
||||
maxval = np.percentile(resized_frame, 96)
|
||||
# don't adjust if the image is a single color
|
||||
if minval < maxval:
|
||||
resized_frame = np.clip(resized_frame, minval, maxval)
|
||||
resized_frame = (
|
||||
((resized_frame - minval) / (maxval - minval)) * 255
|
||||
).astype(np.uint8)
|
||||
|
||||
# mask frame
|
||||
resized_frame[self.mask] = [255]
|
||||
@@ -49,6 +55,8 @@ class MotionDetector:
|
||||
if self.frame_counter < 30:
|
||||
self.frame_counter += 1
|
||||
else:
|
||||
if self.save_images:
|
||||
self.frame_counter += 1
|
||||
# compare to average
|
||||
frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame))
|
||||
|
||||
@@ -58,7 +66,6 @@ class MotionDetector:
|
||||
cv2.accumulateWeighted(frameDelta, self.avg_delta, self.config.delta_alpha)
|
||||
|
||||
# compute the threshold image for the current frame
|
||||
# TODO: threshold
|
||||
current_thresh = cv2.threshold(
|
||||
frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY
|
||||
)[1]
|
||||
@@ -75,8 +82,10 @@ class MotionDetector:
|
||||
|
||||
# dilate the thresholded image to fill in holes, then find contours
|
||||
# on thresholded image
|
||||
thresh = cv2.dilate(thresh, None, iterations=2)
|
||||
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
||||
thresh_dilated = cv2.dilate(thresh, None, iterations=2)
|
||||
cnts = cv2.findContours(
|
||||
thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
|
||||
)
|
||||
cnts = imutils.grab_contours(cnts)
|
||||
|
||||
# loop over the contours
|
||||
@@ -94,6 +103,35 @@ class MotionDetector:
|
||||
)
|
||||
)
|
||||
|
||||
if self.save_images:
|
||||
thresh_dilated = cv2.cvtColor(thresh_dilated, cv2.COLOR_GRAY2BGR)
|
||||
# print("--------")
|
||||
# print(self.frame_counter)
|
||||
for c in cnts:
|
||||
contour_area = cv2.contourArea(c)
|
||||
# print(contour_area)
|
||||
if contour_area > self.config.contour_area:
|
||||
x, y, w, h = cv2.boundingRect(c)
|
||||
cv2.rectangle(
|
||||
thresh_dilated,
|
||||
(x, y),
|
||||
(x + w, y + h),
|
||||
(0, 0, 255),
|
||||
2,
|
||||
)
|
||||
# print("--------")
|
||||
image_row_1 = cv2.hconcat(
|
||||
[
|
||||
cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR),
|
||||
cv2.cvtColor(avg_delta_image, cv2.COLOR_GRAY2BGR),
|
||||
]
|
||||
)
|
||||
image_row_2 = cv2.hconcat(
|
||||
[cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR), thresh_dilated]
|
||||
)
|
||||
combined_image = cv2.vconcat([image_row_1, image_row_2])
|
||||
cv2.imwrite(f"motion/motion-{self.frame_counter}.jpg", combined_image)
|
||||
|
||||
if len(motion_boxes) > 0:
|
||||
self.motion_frame_count += 1
|
||||
if self.motion_frame_count >= 10:
|
||||
|
||||
@@ -107,7 +107,7 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics):
|
||||
+ str(rc)
|
||||
)
|
||||
|
||||
logger.info("MQTT connected")
|
||||
logger.debug("MQTT connected")
|
||||
client.subscribe(f"{mqtt_config.topic_prefix}/#")
|
||||
client.publish(mqtt_config.topic_prefix + "/available", "online", retain=True)
|
||||
|
||||
|
||||
@@ -16,14 +16,14 @@ from typing import Callable, Dict
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from frigate.config import CameraConfig, FrigateConfig
|
||||
from frigate.config import CameraConfig, SnapshotsConfig, RecordConfig, FrigateConfig
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
|
||||
from frigate.edgetpu import load_labels
|
||||
from frigate.util import (
|
||||
SharedMemoryFrameManager,
|
||||
calculate_region,
|
||||
draw_box_with_label,
|
||||
draw_timestamp,
|
||||
load_labels,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -71,8 +71,10 @@ class TrackedObject:
|
||||
self.camera_config = camera_config
|
||||
self.frame_cache = frame_cache
|
||||
self.current_zones = []
|
||||
self.entered_zones = set()
|
||||
self.entered_zones = []
|
||||
self.false_positive = True
|
||||
self.has_clip = False
|
||||
self.has_snapshot = False
|
||||
self.top_score = self.computed_score = 0.0
|
||||
self.thumbnail_data = None
|
||||
self.last_updated = 0
|
||||
@@ -145,7 +147,8 @@ class TrackedObject:
|
||||
# if the object passed the filters once, dont apply again
|
||||
if name in self.current_zones or not zone_filtered(self, zone.filters):
|
||||
current_zones.append(name)
|
||||
self.entered_zones.add(name)
|
||||
if name not in self.entered_zones:
|
||||
self.entered_zones.append(name)
|
||||
|
||||
# if the zones changed, signal an update
|
||||
if not self.false_positive and set(self.current_zones) != set(current_zones):
|
||||
@@ -174,8 +177,12 @@ class TrackedObject:
|
||||
"box": self.obj_data["box"],
|
||||
"area": self.obj_data["area"],
|
||||
"region": self.obj_data["region"],
|
||||
"motionless_count": self.obj_data["motionless_count"],
|
||||
"position_changes": self.obj_data["position_changes"],
|
||||
"current_zones": self.current_zones.copy(),
|
||||
"entered_zones": list(self.entered_zones).copy(),
|
||||
"entered_zones": self.entered_zones.copy(),
|
||||
"has_clip": self.has_clip,
|
||||
"has_snapshot": self.has_snapshot,
|
||||
}
|
||||
|
||||
if include_thumbnail:
|
||||
@@ -258,8 +265,15 @@ class TrackedObject:
|
||||
|
||||
if crop:
|
||||
box = self.thumbnail_data["box"]
|
||||
box_size = 300
|
||||
region = calculate_region(
|
||||
best_frame.shape, box[0], box[1], box[2], box[3], 1.1
|
||||
best_frame.shape,
|
||||
box[0],
|
||||
box[1],
|
||||
box[2],
|
||||
box[3],
|
||||
box_size,
|
||||
multiplier=1.1,
|
||||
)
|
||||
best_frame = best_frame[region[1] : region[3], region[0] : region[2]]
|
||||
|
||||
@@ -580,6 +594,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
event_queue,
|
||||
event_processed_queue,
|
||||
video_output_queue,
|
||||
recordings_info_queue,
|
||||
stop_event,
|
||||
):
|
||||
threading.Thread.__init__(self)
|
||||
@@ -591,6 +606,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
self.event_queue = event_queue
|
||||
self.event_processed_queue = event_processed_queue
|
||||
self.video_output_queue = video_output_queue
|
||||
self.recordings_info_queue = recordings_info_queue
|
||||
self.stop_event = stop_event
|
||||
self.camera_states: Dict[str, CameraState] = {}
|
||||
self.frame_manager = SharedMemoryFrameManager()
|
||||
@@ -599,6 +615,8 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
self.event_queue.put(("start", camera, obj.to_dict()))
|
||||
|
||||
def update(camera, obj: TrackedObject, current_frame_time):
|
||||
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
||||
obj.has_clip = self.should_retain_recording(camera, obj)
|
||||
after = obj.to_dict()
|
||||
message = {
|
||||
"before": obj.previous,
|
||||
@@ -609,11 +627,51 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
f"{self.topic_prefix}/events", json.dumps(message), retain=False
|
||||
)
|
||||
obj.previous = after
|
||||
self.event_queue.put(
|
||||
("update", camera, obj.to_dict(include_thumbnail=True))
|
||||
)
|
||||
|
||||
def end(camera, obj: TrackedObject, current_frame_time):
|
||||
snapshot_config = self.config.cameras[camera].snapshots
|
||||
event_data = obj.to_dict(include_thumbnail=True)
|
||||
event_data["has_snapshot"] = False
|
||||
# populate has_snapshot
|
||||
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
||||
obj.has_clip = self.should_retain_recording(camera, obj)
|
||||
|
||||
# write the snapshot to disk
|
||||
if obj.has_snapshot:
|
||||
snapshot_config: SnapshotsConfig = self.config.cameras[camera].snapshots
|
||||
jpg_bytes = obj.get_jpg_bytes(
|
||||
timestamp=snapshot_config.timestamp,
|
||||
bounding_box=snapshot_config.bounding_box,
|
||||
crop=snapshot_config.crop,
|
||||
height=snapshot_config.height,
|
||||
quality=snapshot_config.quality,
|
||||
)
|
||||
if jpg_bytes is None:
|
||||
logger.warning(f"Unable to save snapshot for {obj.obj_data['id']}.")
|
||||
else:
|
||||
with open(
|
||||
os.path.join(CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"),
|
||||
"wb",
|
||||
) as j:
|
||||
j.write(jpg_bytes)
|
||||
|
||||
# write clean snapshot if enabled
|
||||
if snapshot_config.clean_copy:
|
||||
png_bytes = obj.get_clean_png()
|
||||
if png_bytes is None:
|
||||
logger.warning(
|
||||
f"Unable to save clean snapshot for {obj.obj_data['id']}."
|
||||
)
|
||||
else:
|
||||
with open(
|
||||
os.path.join(
|
||||
CLIPS_DIR,
|
||||
f"{camera}-{obj.obj_data['id']}-clean.png",
|
||||
),
|
||||
"wb",
|
||||
) as p:
|
||||
p.write(png_bytes)
|
||||
|
||||
if not obj.false_positive:
|
||||
message = {
|
||||
"before": obj.previous,
|
||||
@@ -623,46 +681,8 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
self.client.publish(
|
||||
f"{self.topic_prefix}/events", json.dumps(message), retain=False
|
||||
)
|
||||
# write snapshot to disk if enabled
|
||||
if snapshot_config.enabled and self.should_save_snapshot(camera, obj):
|
||||
jpg_bytes = obj.get_jpg_bytes(
|
||||
timestamp=snapshot_config.timestamp,
|
||||
bounding_box=snapshot_config.bounding_box,
|
||||
crop=snapshot_config.crop,
|
||||
height=snapshot_config.height,
|
||||
quality=snapshot_config.quality,
|
||||
)
|
||||
if jpg_bytes is None:
|
||||
logger.warning(
|
||||
f"Unable to save snapshot for {obj.obj_data['id']}."
|
||||
)
|
||||
else:
|
||||
with open(
|
||||
os.path.join(
|
||||
CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"
|
||||
),
|
||||
"wb",
|
||||
) as j:
|
||||
j.write(jpg_bytes)
|
||||
event_data["has_snapshot"] = True
|
||||
|
||||
# write clean snapshot if enabled
|
||||
if snapshot_config.clean_copy:
|
||||
png_bytes = obj.get_clean_png()
|
||||
if png_bytes is None:
|
||||
logger.warning(
|
||||
f"Unable to save clean snapshot for {obj.obj_data['id']}."
|
||||
)
|
||||
else:
|
||||
with open(
|
||||
os.path.join(
|
||||
CLIPS_DIR,
|
||||
f"{camera}-{obj.obj_data['id']}-clean.png",
|
||||
),
|
||||
"wb",
|
||||
) as p:
|
||||
p.write(png_bytes)
|
||||
self.event_queue.put(("end", camera, event_data))
|
||||
self.event_queue.put(("end", camera, obj.to_dict(include_thumbnail=True)))
|
||||
|
||||
def snapshot(camera, obj: TrackedObject, current_frame_time):
|
||||
mqtt_config = self.config.cameras[camera].mqtt
|
||||
@@ -711,9 +731,21 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
self.zone_data = defaultdict(lambda: defaultdict(dict))
|
||||
|
||||
def should_save_snapshot(self, camera, obj: TrackedObject):
|
||||
if obj.false_positive:
|
||||
return False
|
||||
|
||||
snapshot_config: SnapshotsConfig = self.config.cameras[camera].snapshots
|
||||
|
||||
if not snapshot_config.enabled:
|
||||
return False
|
||||
|
||||
# object never changed position
|
||||
if obj.obj_data["position_changes"] == 0:
|
||||
return False
|
||||
|
||||
# if there are required zones and there is no overlap
|
||||
required_zones = self.config.cameras[camera].snapshots.required_zones
|
||||
if len(required_zones) > 0 and not obj.entered_zones & set(required_zones):
|
||||
required_zones = snapshot_config.required_zones
|
||||
if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
|
||||
logger.debug(
|
||||
f"Not creating snapshot for {obj.obj_data['id']} because it did not enter required zones"
|
||||
)
|
||||
@@ -721,10 +753,48 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
|
||||
return True
|
||||
|
||||
def should_retain_recording(self, camera, obj: TrackedObject):
|
||||
if obj.false_positive:
|
||||
return False
|
||||
|
||||
record_config: RecordConfig = self.config.cameras[camera].record
|
||||
|
||||
# Recording is disabled
|
||||
if not record_config.enabled:
|
||||
return False
|
||||
|
||||
# object never changed position
|
||||
if obj.obj_data["position_changes"] == 0:
|
||||
return False
|
||||
|
||||
# If there are required zones and there is no overlap
|
||||
required_zones = record_config.events.required_zones
|
||||
if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
|
||||
logger.debug(
|
||||
f"Not creating clip for {obj.obj_data['id']} because it did not enter required zones"
|
||||
)
|
||||
return False
|
||||
|
||||
# If the required objects are not present
|
||||
if (
|
||||
record_config.events.objects is not None
|
||||
and obj.obj_data["label"] not in record_config.events.objects
|
||||
):
|
||||
logger.debug(
|
||||
f"Not creating clip for {obj.obj_data['id']} because it did not contain required objects"
|
||||
)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def should_mqtt_snapshot(self, camera, obj: TrackedObject):
|
||||
# object never changed position
|
||||
if obj.obj_data["position_changes"] == 0:
|
||||
return False
|
||||
|
||||
# if there are required zones and there is no overlap
|
||||
required_zones = self.config.cameras[camera].mqtt.required_zones
|
||||
if len(required_zones) > 0 and not obj.entered_zones & set(required_zones):
|
||||
if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
|
||||
logger.debug(
|
||||
f"Not sending mqtt for {obj.obj_data['id']} because it did not enter required zones"
|
||||
)
|
||||
@@ -767,11 +837,26 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
frame_time, current_tracked_objects, motion_boxes, regions
|
||||
)
|
||||
|
||||
tracked_objects = [
|
||||
o.to_dict() for o in camera_state.tracked_objects.values()
|
||||
]
|
||||
|
||||
self.video_output_queue.put(
|
||||
(
|
||||
camera,
|
||||
frame_time,
|
||||
current_tracked_objects,
|
||||
tracked_objects,
|
||||
motion_boxes,
|
||||
regions,
|
||||
)
|
||||
)
|
||||
|
||||
# send info on this frame to the recordings maintainer
|
||||
self.recordings_info_queue.put(
|
||||
(
|
||||
camera,
|
||||
frame_time,
|
||||
tracked_objects,
|
||||
motion_boxes,
|
||||
regions,
|
||||
)
|
||||
@@ -815,17 +900,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
|
||||
# cleanup event finished queue
|
||||
while not self.event_processed_queue.empty():
|
||||
event_id, camera, clip_created = self.event_processed_queue.get()
|
||||
if clip_created:
|
||||
obj = self.camera_states[camera].tracked_objects[event_id]
|
||||
message = {
|
||||
"before": obj.previous,
|
||||
"after": obj.to_dict(),
|
||||
"type": "clip_ready",
|
||||
}
|
||||
self.client.publish(
|
||||
f"{self.topic_prefix}/events", json.dumps(message), retain=False
|
||||
)
|
||||
event_id, camera = self.event_processed_queue.get()
|
||||
self.camera_states[camera].finished(event_id)
|
||||
|
||||
logger.info(f"Exiting object processor...")
|
||||
|
||||
@@ -13,31 +13,100 @@ import numpy as np
|
||||
from scipy.spatial import distance as dist
|
||||
|
||||
from frigate.config import DetectConfig
|
||||
from frigate.util import draw_box_with_label
|
||||
from frigate.util import intersection_over_union
|
||||
|
||||
|
||||
class ObjectTracker:
|
||||
def __init__(self, config: DetectConfig):
|
||||
self.tracked_objects = {}
|
||||
self.disappeared = {}
|
||||
self.positions = {}
|
||||
self.max_disappeared = config.max_disappeared
|
||||
self.detect_config = config
|
||||
|
||||
def register(self, index, obj):
|
||||
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||
id = f"{obj['frame_time']}-{rand_id}"
|
||||
obj["id"] = id
|
||||
obj["start_time"] = obj["frame_time"]
|
||||
obj["motionless_count"] = 0
|
||||
obj["position_changes"] = 0
|
||||
self.tracked_objects[id] = obj
|
||||
self.disappeared[id] = 0
|
||||
self.positions[id] = {
|
||||
"xmins": [],
|
||||
"ymins": [],
|
||||
"xmaxs": [],
|
||||
"ymaxs": [],
|
||||
"xmin": 0,
|
||||
"ymin": 0,
|
||||
"xmax": self.detect_config.width,
|
||||
"ymax": self.detect_config.height,
|
||||
}
|
||||
|
||||
def deregister(self, id):
|
||||
del self.tracked_objects[id]
|
||||
del self.disappeared[id]
|
||||
|
||||
# tracks the current position of the object based on the last 10 bounding boxes
|
||||
# returns False if the object has moved outside its previous position
|
||||
def update_position(self, id, box):
|
||||
position = self.positions[id]
|
||||
position_box = (
|
||||
position["xmin"],
|
||||
position["ymin"],
|
||||
position["xmax"],
|
||||
position["ymax"],
|
||||
)
|
||||
|
||||
xmin, ymin, xmax, ymax = box
|
||||
|
||||
iou = intersection_over_union(position_box, box)
|
||||
|
||||
# if the iou drops below the threshold
|
||||
# assume the object has moved to a new position and reset the computed box
|
||||
if iou < 0.6:
|
||||
self.positions[id] = {
|
||||
"xmins": [xmin],
|
||||
"ymins": [ymin],
|
||||
"xmaxs": [xmax],
|
||||
"ymaxs": [ymax],
|
||||
"xmin": xmin,
|
||||
"ymin": ymin,
|
||||
"xmax": xmax,
|
||||
"ymax": ymax,
|
||||
}
|
||||
return False
|
||||
|
||||
# if there are less than 10 entries for the position, add the bounding box
|
||||
# and recompute the position box
|
||||
if len(position["xmins"]) < 10:
|
||||
position["xmins"].append(xmin)
|
||||
position["ymins"].append(ymin)
|
||||
position["xmaxs"].append(xmax)
|
||||
position["ymaxs"].append(ymax)
|
||||
# by using percentiles here, we hopefully remove outliers
|
||||
position["xmin"] = np.percentile(position["xmins"], 15)
|
||||
position["ymin"] = np.percentile(position["ymins"], 15)
|
||||
position["xmax"] = np.percentile(position["xmaxs"], 85)
|
||||
position["ymax"] = np.percentile(position["ymaxs"], 85)
|
||||
|
||||
return True
|
||||
|
||||
def update(self, id, new_obj):
|
||||
self.disappeared[id] = 0
|
||||
# update the motionless count if the object has not moved to a new position
|
||||
if self.update_position(id, new_obj["box"]):
|
||||
self.tracked_objects[id]["motionless_count"] += 1
|
||||
else:
|
||||
self.tracked_objects[id]["motionless_count"] = 0
|
||||
self.tracked_objects[id]["position_changes"] += 1
|
||||
self.tracked_objects[id].update(new_obj)
|
||||
|
||||
def update_frame_times(self, frame_time):
|
||||
for id in self.tracked_objects.keys():
|
||||
self.tracked_objects[id]["frame_time"] = frame_time
|
||||
|
||||
def match_and_update(self, frame_time, new_objects):
|
||||
# group by name
|
||||
new_object_groups = defaultdict(lambda: [])
|
||||
|
||||
@@ -104,7 +104,7 @@ class BirdsEyeFrameManager:
|
||||
self.blank_frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]] = 16
|
||||
|
||||
# find and copy the logo on the blank frame
|
||||
logo_files = glob.glob("/opt/frigate/web/apple-touch-icon.*.png")
|
||||
logo_files = glob.glob("/opt/frigate/frigate/birdseye.png")
|
||||
frigate_logo = None
|
||||
if len(logo_files) > 0:
|
||||
frigate_logo = cv2.imread(logo_files[0], cv2.IMREAD_UNCHANGED)
|
||||
|
||||
@@ -1,20 +1,25 @@
|
||||
import datetime
|
||||
import itertools
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
import os
|
||||
import queue
|
||||
import random
|
||||
import shutil
|
||||
import string
|
||||
import subprocess as sp
|
||||
import threading
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
import psutil
|
||||
from peewee import JOIN, DoesNotExist
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config import RetainModeEnum, FrigateConfig
|
||||
from frigate.const import CACHE_DIR, RECORD_DIR
|
||||
from frigate.models import Event, Recordings
|
||||
from frigate.util import area
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -38,20 +43,28 @@ def remove_empty_directories(directory):
|
||||
|
||||
|
||||
class RecordingMaintainer(threading.Thread):
|
||||
def __init__(self, config: FrigateConfig, stop_event):
|
||||
def __init__(
|
||||
self, config: FrigateConfig, recordings_info_queue: mp.Queue, stop_event
|
||||
):
|
||||
threading.Thread.__init__(self)
|
||||
self.name = "recording_maint"
|
||||
self.config = config
|
||||
self.recordings_info_queue = recordings_info_queue
|
||||
self.stop_event = stop_event
|
||||
self.first_pass = True
|
||||
self.recordings_info = defaultdict(list)
|
||||
self.end_time_cache = {}
|
||||
|
||||
def move_files(self):
|
||||
recordings = [
|
||||
d
|
||||
for d in os.listdir(CACHE_DIR)
|
||||
if os.path.isfile(os.path.join(CACHE_DIR, d))
|
||||
and d.endswith(".mp4")
|
||||
and not d.startswith("clip_")
|
||||
]
|
||||
cache_files = sorted(
|
||||
[
|
||||
d
|
||||
for d in os.listdir(CACHE_DIR)
|
||||
if os.path.isfile(os.path.join(CACHE_DIR, d))
|
||||
and d.endswith(".mp4")
|
||||
and not d.startswith("clip_")
|
||||
]
|
||||
)
|
||||
|
||||
files_in_use = []
|
||||
for process in psutil.process_iter():
|
||||
@@ -66,7 +79,9 @@ class RecordingMaintainer(threading.Thread):
|
||||
except:
|
||||
continue
|
||||
|
||||
for f in recordings:
|
||||
# group recordings by camera
|
||||
grouped_recordings = defaultdict(list)
|
||||
for f in cache_files:
|
||||
# Skip files currently in use
|
||||
if f in files_in_use:
|
||||
continue
|
||||
@@ -76,45 +91,187 @@ class RecordingMaintainer(threading.Thread):
|
||||
camera, date = basename.rsplit("-", maxsplit=1)
|
||||
start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S")
|
||||
|
||||
# Just delete files if recordings are turned off
|
||||
if (
|
||||
not camera in self.config.cameras
|
||||
or not self.config.cameras[camera].record.enabled
|
||||
):
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
continue
|
||||
|
||||
ffprobe_cmd = [
|
||||
"ffprobe",
|
||||
"-v",
|
||||
"error",
|
||||
"-show_entries",
|
||||
"format=duration",
|
||||
"-of",
|
||||
"default=noprint_wrappers=1:nokey=1",
|
||||
f"{cache_path}",
|
||||
]
|
||||
p = sp.run(ffprobe_cmd, capture_output=True)
|
||||
if p.returncode == 0:
|
||||
duration = float(p.stdout.decode().strip())
|
||||
end_time = start_time + datetime.timedelta(seconds=duration)
|
||||
else:
|
||||
logger.info(f"bad file: {f}")
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
continue
|
||||
|
||||
directory = os.path.join(
|
||||
RECORD_DIR, start_time.strftime("%Y-%m/%d/%H"), camera
|
||||
grouped_recordings[camera].append(
|
||||
{
|
||||
"cache_path": cache_path,
|
||||
"start_time": start_time,
|
||||
}
|
||||
)
|
||||
|
||||
if not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
# delete all cached files past the most recent 5
|
||||
keep_count = 5
|
||||
for camera in grouped_recordings.keys():
|
||||
if len(grouped_recordings[camera]) > keep_count:
|
||||
to_remove = grouped_recordings[camera][:-keep_count]
|
||||
for f in to_remove:
|
||||
Path(f["cache_path"]).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(f["cache_path"], None)
|
||||
grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]
|
||||
|
||||
file_name = f"{start_time.strftime('%M.%S.mp4')}"
|
||||
file_path = os.path.join(directory, file_name)
|
||||
for camera, recordings in grouped_recordings.items():
|
||||
|
||||
# clear out all the recording info for old frames
|
||||
while (
|
||||
len(self.recordings_info[camera]) > 0
|
||||
and self.recordings_info[camera][0][0]
|
||||
< recordings[0]["start_time"].timestamp()
|
||||
):
|
||||
self.recordings_info[camera].pop(0)
|
||||
|
||||
# get all events with the end time after the start of the oldest cache file
|
||||
# or with end_time None
|
||||
events: Event = (
|
||||
Event.select()
|
||||
.where(
|
||||
Event.camera == camera,
|
||||
(Event.end_time == None)
|
||||
| (Event.end_time >= recordings[0]["start_time"].timestamp()),
|
||||
Event.has_clip,
|
||||
)
|
||||
.order_by(Event.start_time)
|
||||
)
|
||||
for r in recordings:
|
||||
cache_path = r["cache_path"]
|
||||
start_time = r["start_time"]
|
||||
|
||||
# Just delete files if recordings are turned off
|
||||
if (
|
||||
not camera in self.config.cameras
|
||||
or not self.config.cameras[camera].record.enabled
|
||||
):
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
continue
|
||||
|
||||
if cache_path in self.end_time_cache:
|
||||
end_time, duration = self.end_time_cache[cache_path]
|
||||
else:
|
||||
ffprobe_cmd = [
|
||||
"ffprobe",
|
||||
"-v",
|
||||
"error",
|
||||
"-show_entries",
|
||||
"format=duration",
|
||||
"-of",
|
||||
"default=noprint_wrappers=1:nokey=1",
|
||||
f"{cache_path}",
|
||||
]
|
||||
p = sp.run(ffprobe_cmd, capture_output=True)
|
||||
if p.returncode == 0:
|
||||
duration = float(p.stdout.decode().strip())
|
||||
end_time = start_time + datetime.timedelta(seconds=duration)
|
||||
self.end_time_cache[cache_path] = (end_time, duration)
|
||||
else:
|
||||
logger.warning(f"Discarding a corrupt recording segment: {f}")
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
continue
|
||||
|
||||
# if cached file's start_time is earlier than the retain days for the camera
|
||||
if start_time <= (
|
||||
(
|
||||
datetime.datetime.now()
|
||||
- datetime.timedelta(
|
||||
days=self.config.cameras[camera].record.retain.days
|
||||
)
|
||||
)
|
||||
):
|
||||
# if the cached segment overlaps with the events:
|
||||
overlaps = False
|
||||
for event in events:
|
||||
# if the event starts in the future, stop checking events
|
||||
# and remove this segment
|
||||
if event.start_time > end_time.timestamp():
|
||||
overlaps = False
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
break
|
||||
|
||||
# if the event is in progress or ends after the recording starts, keep it
|
||||
# and stop looking at events
|
||||
if (
|
||||
event.end_time is None
|
||||
or event.end_time >= start_time.timestamp()
|
||||
):
|
||||
overlaps = True
|
||||
break
|
||||
|
||||
if overlaps:
|
||||
record_mode = self.config.cameras[
|
||||
camera
|
||||
].record.events.retain.mode
|
||||
# move from cache to recordings immediately
|
||||
self.store_segment(
|
||||
camera,
|
||||
start_time,
|
||||
end_time,
|
||||
duration,
|
||||
cache_path,
|
||||
record_mode,
|
||||
)
|
||||
# else retain days includes this segment
|
||||
else:
|
||||
record_mode = self.config.cameras[camera].record.retain.mode
|
||||
self.store_segment(
|
||||
camera, start_time, end_time, duration, cache_path, record_mode
|
||||
)
|
||||
|
||||
def segment_stats(self, camera, start_time, end_time):
|
||||
active_count = 0
|
||||
motion_count = 0
|
||||
for frame in self.recordings_info[camera]:
|
||||
# frame is after end time of segment
|
||||
if frame[0] > end_time.timestamp():
|
||||
break
|
||||
# frame is before start time of segment
|
||||
if frame[0] < start_time.timestamp():
|
||||
continue
|
||||
|
||||
active_count += len(
|
||||
[
|
||||
o
|
||||
for o in frame[1]
|
||||
if not o["false_positive"] and o["motionless_count"] > 0
|
||||
]
|
||||
)
|
||||
|
||||
motion_count += sum([area(box) for box in frame[2]])
|
||||
|
||||
return (motion_count, active_count)
|
||||
|
||||
def store_segment(
|
||||
self,
|
||||
camera,
|
||||
start_time,
|
||||
end_time,
|
||||
duration,
|
||||
cache_path,
|
||||
store_mode: RetainModeEnum,
|
||||
):
|
||||
motion_count, active_count = self.segment_stats(camera, start_time, end_time)
|
||||
|
||||
# check if the segment shouldn't be stored
|
||||
if (store_mode == RetainModeEnum.motion and motion_count == 0) or (
|
||||
store_mode == RetainModeEnum.active_objects and active_count == 0
|
||||
):
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
return
|
||||
|
||||
directory = os.path.join(RECORD_DIR, start_time.strftime("%Y-%m/%d/%H"), camera)
|
||||
|
||||
if not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
|
||||
file_name = f"{start_time.strftime('%M.%S.mp4')}"
|
||||
file_path = os.path.join(directory, file_name)
|
||||
|
||||
try:
|
||||
start_frame = datetime.datetime.now().timestamp()
|
||||
# copy then delete is required when recordings are stored on some network drives
|
||||
shutil.copyfile(cache_path, file_path)
|
||||
logger.debug(
|
||||
f"Copied {file_path} in {datetime.datetime.now().timestamp()-start_frame} seconds."
|
||||
)
|
||||
os.remove(cache_path)
|
||||
|
||||
rand_id = "".join(
|
||||
@@ -127,12 +284,61 @@ class RecordingMaintainer(threading.Thread):
|
||||
start_time=start_time.timestamp(),
|
||||
end_time=end_time.timestamp(),
|
||||
duration=duration,
|
||||
motion=motion_count,
|
||||
objects=active_count,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Unable to store recording segment {cache_path}")
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
logger.error(e)
|
||||
|
||||
# clear end_time cache
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
|
||||
def run(self):
|
||||
# Check for new files every 5 seconds
|
||||
while not self.stop_event.wait(5):
|
||||
self.move_files()
|
||||
wait_time = 5
|
||||
while not self.stop_event.wait(wait_time):
|
||||
run_start = datetime.datetime.now().timestamp()
|
||||
|
||||
# empty the recordings info queue
|
||||
while True:
|
||||
try:
|
||||
(
|
||||
camera,
|
||||
frame_time,
|
||||
current_tracked_objects,
|
||||
motion_boxes,
|
||||
regions,
|
||||
) = self.recordings_info_queue.get(False)
|
||||
|
||||
if self.config.cameras[camera].record.enabled:
|
||||
self.recordings_info[camera].append(
|
||||
(
|
||||
frame_time,
|
||||
current_tracked_objects,
|
||||
motion_boxes,
|
||||
regions,
|
||||
)
|
||||
)
|
||||
except queue.Empty:
|
||||
break
|
||||
|
||||
try:
|
||||
self.move_files()
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error occurred when attempting to maintain recording cache"
|
||||
)
|
||||
logger.error(e)
|
||||
duration = datetime.datetime.now().timestamp() - run_start
|
||||
wait_time = max(0, 5 - duration)
|
||||
if wait_time == 0 and not self.first_pass:
|
||||
logger.warning(
|
||||
"Cache is taking longer than 5 seconds to clear. Your recordings disk may be too slow."
|
||||
)
|
||||
if self.first_pass:
|
||||
self.first_pass = False
|
||||
|
||||
logger.info(f"Exiting recording maintenance...")
|
||||
|
||||
@@ -157,7 +363,7 @@ class RecordingCleanup(threading.Thread):
|
||||
|
||||
logger.debug("Start deleted cameras.")
|
||||
# Handle deleted cameras
|
||||
expire_days = self.config.record.retain_days
|
||||
expire_days = self.config.record.retain.days
|
||||
expire_before = (
|
||||
datetime.datetime.now() - datetime.timedelta(days=expire_days)
|
||||
).timestamp()
|
||||
@@ -166,9 +372,13 @@ class RecordingCleanup(threading.Thread):
|
||||
Recordings.end_time < expire_before,
|
||||
)
|
||||
|
||||
deleted_recordings = set()
|
||||
for recording in no_camera_recordings:
|
||||
Path(recording.path).unlink(missing_ok=True)
|
||||
Recordings.delete_by_id(recording.id)
|
||||
deleted_recordings.add(recording.id)
|
||||
|
||||
logger.debug(f"Expiring {len(deleted_recordings)} recordings")
|
||||
Recordings.delete().where(Recordings.id << deleted_recordings).execute()
|
||||
logger.debug("End deleted cameras.")
|
||||
|
||||
logger.debug("Start all cameras.")
|
||||
@@ -179,7 +389,7 @@ class RecordingCleanup(threading.Thread):
|
||||
datetime.datetime.now()
|
||||
- datetime.timedelta(seconds=config.record.events.max_seconds)
|
||||
).timestamp()
|
||||
expire_days = config.record.retain_days
|
||||
expire_days = config.record.retain.days
|
||||
expire_before = (
|
||||
datetime.datetime.now() - datetime.timedelta(days=expire_days)
|
||||
).timestamp()
|
||||
@@ -192,47 +402,70 @@ class RecordingCleanup(threading.Thread):
|
||||
Recordings.camera == camera,
|
||||
Recordings.end_time < expire_date,
|
||||
)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.order_by(Recordings.start_time)
|
||||
)
|
||||
|
||||
# Get all the events to check against
|
||||
events: Event = (
|
||||
Event.select()
|
||||
.where(
|
||||
Event.camera == camera, Event.end_time < expire_date, Event.has_clip
|
||||
Event.camera == camera,
|
||||
# need to ensure segments for all events starting
|
||||
# before the expire date are included
|
||||
Event.start_time < expire_date,
|
||||
Event.has_clip,
|
||||
)
|
||||
.order_by(Event.start_time.desc())
|
||||
.order_by(Event.start_time)
|
||||
.objects()
|
||||
)
|
||||
|
||||
# loop over recordings and see if they overlap with any non-expired events
|
||||
# TODO: expire segments based on segment stats according to config
|
||||
event_start = 0
|
||||
deleted_recordings = set()
|
||||
for recording in recordings.objects().iterator():
|
||||
keep = False
|
||||
# since the events and recordings are sorted, we can skip events
|
||||
# that start after the previous recording segment ended
|
||||
# Now look for a reason to keep this recording segment
|
||||
for idx in range(event_start, len(events)):
|
||||
event = events[idx]
|
||||
|
||||
# if the next event ends before this segment starts, break
|
||||
if event.end_time < recording.start_time:
|
||||
# if the event starts in the future, stop checking events
|
||||
# and let this recording segment expire
|
||||
if event.start_time > recording.end_time:
|
||||
keep = False
|
||||
break
|
||||
|
||||
# if the next event starts after the current segment ends, skip it
|
||||
if event.start_time > recording.end_time:
|
||||
# if the event is in progress or ends after the recording starts, keep it
|
||||
# and stop looking at events
|
||||
if event.end_time is None or event.end_time >= recording.start_time:
|
||||
keep = True
|
||||
break
|
||||
|
||||
# if the event ends before this recording segment starts, skip
|
||||
# this event and check the next event for an overlap.
|
||||
# since the events and recordings are sorted, we can skip events
|
||||
# that end before the previous recording segment started on future segments
|
||||
if event.end_time < recording.start_time:
|
||||
event_start = idx
|
||||
continue
|
||||
|
||||
keep = True
|
||||
|
||||
# Delete recordings outside of the retention window
|
||||
if not keep:
|
||||
# Delete recordings outside of the retention window or based on the retention mode
|
||||
if (
|
||||
not keep
|
||||
or (
|
||||
config.record.events.retain.mode == RetainModeEnum.motion
|
||||
and recording.motion == 0
|
||||
)
|
||||
or (
|
||||
config.record.events.retain.mode
|
||||
== RetainModeEnum.active_objects
|
||||
and recording.objects == 0
|
||||
)
|
||||
):
|
||||
Path(recording.path).unlink(missing_ok=True)
|
||||
deleted_recordings.add(recording.id)
|
||||
|
||||
logger.debug(f"Expiring {len(deleted_recordings)} recordings")
|
||||
(Recordings.delete().where(Recordings.id << deleted_recordings).execute())
|
||||
Recordings.delete().where(Recordings.id << deleted_recordings).execute()
|
||||
|
||||
logger.debug(f"End camera: {camera}.")
|
||||
|
||||
@@ -244,29 +477,32 @@ class RecordingCleanup(threading.Thread):
|
||||
|
||||
default_expire = (
|
||||
datetime.datetime.now().timestamp()
|
||||
- SECONDS_IN_DAY * self.config.record.retain_days
|
||||
- SECONDS_IN_DAY * self.config.record.retain.days
|
||||
)
|
||||
delete_before = {}
|
||||
|
||||
for name, camera in self.config.cameras.items():
|
||||
delete_before[name] = (
|
||||
datetime.datetime.now().timestamp()
|
||||
- SECONDS_IN_DAY * camera.record.retain_days
|
||||
- SECONDS_IN_DAY * camera.record.retain.days
|
||||
)
|
||||
|
||||
# find all the recordings older than the oldest recording in the db
|
||||
try:
|
||||
oldest_recording = (
|
||||
Recordings.select().order_by(Recordings.start_time.desc()).get()
|
||||
)
|
||||
oldest_recording = Recordings.select().order_by(Recordings.start_time).get()
|
||||
|
||||
oldest_timestamp = oldest_recording.start_time
|
||||
p = Path(oldest_recording.path)
|
||||
oldest_timestamp = p.stat().st_mtime - 1
|
||||
except DoesNotExist:
|
||||
oldest_timestamp = datetime.datetime.now().timestamp()
|
||||
except FileNotFoundError:
|
||||
logger.warning(f"Unable to find file from recordings database: {p}")
|
||||
Recordings.delete().where(Recordings.id == oldest_recording.id).execute()
|
||||
return
|
||||
|
||||
logger.debug(f"Oldest recording in the db: {oldest_timestamp}")
|
||||
process = sp.run(
|
||||
["find", RECORD_DIR, "-type", "f", "-newermt", f"@{oldest_timestamp}"],
|
||||
["find", RECORD_DIR, "-type", "f", "!", "-newermt", f"@{oldest_timestamp}"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
@@ -274,21 +510,52 @@ class RecordingCleanup(threading.Thread):
|
||||
|
||||
for f in files_to_check:
|
||||
p = Path(f)
|
||||
if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire):
|
||||
p.unlink(missing_ok=True)
|
||||
try:
|
||||
if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire):
|
||||
p.unlink(missing_ok=True)
|
||||
except FileNotFoundError:
|
||||
logger.warning(f"Attempted to expire missing file: {f}")
|
||||
|
||||
logger.debug("End expire files (legacy).")
|
||||
|
||||
def sync_recordings(self):
|
||||
logger.debug("Start sync recordings.")
|
||||
|
||||
# get all recordings in the db
|
||||
recordings: Recordings = Recordings.select()
|
||||
|
||||
# get all recordings files on disk
|
||||
process = sp.run(
|
||||
["find", RECORD_DIR, "-type", "f"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
files_on_disk = process.stdout.splitlines()
|
||||
|
||||
recordings_to_delete = []
|
||||
for recording in recordings.objects().iterator():
|
||||
if not recording.path in files_on_disk:
|
||||
recordings_to_delete.append(recording.id)
|
||||
|
||||
logger.debug(
|
||||
f"Deleting {len(recordings_to_delete)} recordings with missing files"
|
||||
)
|
||||
Recordings.delete().where(Recordings.id << recordings_to_delete).execute()
|
||||
|
||||
logger.debug("End sync recordings.")
|
||||
|
||||
def run(self):
|
||||
# Expire recordings every minute, clean directories every hour.
|
||||
for counter in itertools.cycle(range(60)):
|
||||
# on startup sync recordings with disk (disabled due to too much CPU usage)
|
||||
# self.sync_recordings()
|
||||
|
||||
# Expire tmp clips every minute, recordings and clean directories every hour.
|
||||
for counter in itertools.cycle(range(self.config.record.expire_interval)):
|
||||
if self.stop_event.wait(60):
|
||||
logger.info(f"Exiting recording cleanup...")
|
||||
break
|
||||
|
||||
self.expire_recordings()
|
||||
self.clean_tmp_clips()
|
||||
|
||||
if counter == 0:
|
||||
self.expire_recordings()
|
||||
self.expire_files()
|
||||
remove_empty_directories(RECORD_DIR)
|
||||
|
||||
@@ -4,6 +4,7 @@ import threading
|
||||
import time
|
||||
import psutil
|
||||
import shutil
|
||||
import os
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
|
||||
@@ -31,6 +32,28 @@ def get_fs_type(path):
|
||||
return fsType
|
||||
|
||||
|
||||
def read_temperature(path):
|
||||
if os.path.isfile(path):
|
||||
with open(path) as f:
|
||||
line = f.readline().strip()
|
||||
return int(line) / 1000
|
||||
return None
|
||||
|
||||
|
||||
def get_temperatures():
|
||||
temps = {}
|
||||
|
||||
# Get temperatures for all attached Corals
|
||||
base = "/sys/class/apex/"
|
||||
if os.path.isdir(base):
|
||||
for apex in os.listdir(base):
|
||||
temp = read_temperature(os.path.join(base, apex, "temp"))
|
||||
if temp is not None:
|
||||
temps[apex] = temp
|
||||
|
||||
return temps
|
||||
|
||||
|
||||
def stats_snapshot(stats_tracking):
|
||||
camera_metrics = stats_tracking["camera_metrics"]
|
||||
stats = {}
|
||||
@@ -61,6 +84,7 @@ def stats_snapshot(stats_tracking):
|
||||
"uptime": (int(time.time()) - stats_tracking["started"]),
|
||||
"version": VERSION,
|
||||
"storage": {},
|
||||
"temperatures": get_temperatures(),
|
||||
}
|
||||
|
||||
for path in [RECORD_DIR, CLIPS_DIR, CACHE_DIR, "/dev/shm"]:
|
||||
|
||||
@@ -572,7 +572,7 @@ class TestConfig(unittest.TestCase):
|
||||
assert config == frigate_config.dict(exclude_unset=True)
|
||||
|
||||
runtime_config = frigate_config.runtime_config
|
||||
assert runtime_config.cameras["back"].motion.frame_height >= 120
|
||||
assert runtime_config.cameras["back"].motion.frame_height == 50
|
||||
|
||||
def test_motion_contour_area_dynamic(self):
|
||||
|
||||
@@ -601,7 +601,7 @@ class TestConfig(unittest.TestCase):
|
||||
assert config == frigate_config.dict(exclude_unset=True)
|
||||
|
||||
runtime_config = frigate_config.runtime_config
|
||||
assert round(runtime_config.cameras["back"].motion.contour_area) == 99
|
||||
assert round(runtime_config.cameras["back"].motion.contour_area) == 30
|
||||
|
||||
def test_merge_labelmap(self):
|
||||
|
||||
@@ -702,7 +702,11 @@ class TestConfig(unittest.TestCase):
|
||||
"inputs": [
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video",
|
||||
"roles": ["detect", "clips"],
|
||||
"roles": ["detect"],
|
||||
},
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video2",
|
||||
"roles": ["clips"],
|
||||
},
|
||||
]
|
||||
},
|
||||
@@ -717,6 +721,87 @@ class TestConfig(unittest.TestCase):
|
||||
|
||||
self.assertRaises(ValidationError, lambda: FrigateConfig(**config))
|
||||
|
||||
def test_fails_on_missing_role(self):
|
||||
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"cameras": {
|
||||
"back": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video",
|
||||
"roles": ["detect"],
|
||||
},
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video2",
|
||||
"roles": ["record"],
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
"rtmp": {"enabled": True},
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
frigate_config = FrigateConfig(**config)
|
||||
self.assertRaises(ValueError, lambda: frigate_config.runtime_config)
|
||||
|
||||
def test_works_on_missing_role_multiple_cams(self):
|
||||
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"rtmp": {"enabled": False},
|
||||
"cameras": {
|
||||
"back": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video",
|
||||
"roles": ["detect"],
|
||||
},
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video2",
|
||||
"roles": ["record"],
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
},
|
||||
"cam2": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video",
|
||||
"roles": ["detect"],
|
||||
},
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video2",
|
||||
"roles": ["record"],
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
frigate_config = FrigateConfig(**config)
|
||||
runtime_config = frigate_config.runtime_config
|
||||
|
||||
def test_global_detect(self):
|
||||
|
||||
config = {
|
||||
@@ -958,6 +1043,109 @@ class TestConfig(unittest.TestCase):
|
||||
runtime_config = frigate_config.runtime_config
|
||||
assert runtime_config.cameras["back"].rtmp.enabled
|
||||
|
||||
def test_global_rtmp_default(self):
|
||||
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"rtmp": {"enabled": False},
|
||||
"cameras": {
|
||||
"back": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video",
|
||||
"roles": ["detect"],
|
||||
},
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video2",
|
||||
"roles": ["record"],
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
frigate_config = FrigateConfig(**config)
|
||||
assert config == frigate_config.dict(exclude_unset=True)
|
||||
|
||||
runtime_config = frigate_config.runtime_config
|
||||
assert not runtime_config.cameras["back"].rtmp.enabled
|
||||
|
||||
def test_global_live(self):
|
||||
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"live": {"quality": 4},
|
||||
"cameras": {
|
||||
"back": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video",
|
||||
"roles": ["detect"],
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
frigate_config = FrigateConfig(**config)
|
||||
assert config == frigate_config.dict(exclude_unset=True)
|
||||
|
||||
runtime_config = frigate_config.runtime_config
|
||||
assert runtime_config.cameras["back"].live.quality == 4
|
||||
|
||||
def test_default_live(self):
|
||||
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"cameras": {
|
||||
"back": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video",
|
||||
"roles": ["detect"],
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
frigate_config = FrigateConfig(**config)
|
||||
assert config == frigate_config.dict(exclude_unset=True)
|
||||
|
||||
runtime_config = frigate_config.runtime_config
|
||||
assert runtime_config.cameras["back"].live.quality == 8
|
||||
|
||||
def test_global_live_merge(self):
|
||||
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"live": {"quality": 4, "height": 480},
|
||||
"cameras": {
|
||||
"back": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video",
|
||||
"roles": ["detect"],
|
||||
},
|
||||
]
|
||||
},
|
||||
"live": {
|
||||
"quality": 7,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
frigate_config = FrigateConfig(**config)
|
||||
assert config == frigate_config.dict(exclude_unset=True)
|
||||
|
||||
runtime_config = frigate_config.runtime_config
|
||||
assert runtime_config.cameras["back"].live.quality == 7
|
||||
assert runtime_config.cameras["back"].live.height == 480
|
||||
|
||||
def test_global_timestamp_style(self):
|
||||
|
||||
config = {
|
||||
@@ -1032,6 +1220,54 @@ class TestConfig(unittest.TestCase):
|
||||
assert runtime_config.cameras["back"].timestamp_style.position == "bl"
|
||||
assert runtime_config.cameras["back"].timestamp_style.thickness == 4
|
||||
|
||||
def test_allow_retain_to_be_a_decimal(self):
|
||||
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"snapshots": {"retain": {"default": 1.5}},
|
||||
"cameras": {
|
||||
"back": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video",
|
||||
"roles": ["detect"],
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
frigate_config = FrigateConfig(**config)
|
||||
assert config == frigate_config.dict(exclude_unset=True)
|
||||
|
||||
runtime_config = frigate_config.runtime_config
|
||||
assert runtime_config.cameras["back"].snapshots.retain.default == 1.5
|
||||
|
||||
def test_fails_on_bad_camera_name(self):
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"snapshots": {"retain": {"default": 1.5}},
|
||||
"cameras": {
|
||||
"back camer#": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{
|
||||
"path": "rtsp://10.0.0.1:554/video",
|
||||
"roles": ["detect"],
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
frigate_config = FrigateConfig(**config)
|
||||
|
||||
self.assertRaises(
|
||||
ValidationError, lambda: frigate_config.runtime_config.cameras
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main(verbosity=2)
|
||||
|
||||
26
frigate/test/test_reduce_boxes.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import numpy as np
|
||||
from unittest import TestCase, main
|
||||
from frigate.video import box_overlaps, reduce_boxes
|
||||
|
||||
|
||||
class TestBoxOverlaps(TestCase):
|
||||
def test_overlap(self):
|
||||
assert box_overlaps((100, 100, 200, 200), (50, 50, 150, 150))
|
||||
|
||||
def test_overlap_2(self):
|
||||
assert box_overlaps((50, 50, 150, 150), (100, 100, 200, 200))
|
||||
|
||||
def test_no_overlap(self):
|
||||
assert not box_overlaps((100, 100, 200, 200), (250, 250, 350, 350))
|
||||
|
||||
|
||||
class TestReduceBoxes(TestCase):
|
||||
def test_cluster(self):
|
||||
clusters = reduce_boxes(
|
||||
[(144, 290, 221, 459), (225, 178, 426, 341), (343, 105, 584, 250)]
|
||||
)
|
||||
assert len(clusters) == 2
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(verbosity=2)
|
||||
@@ -18,6 +18,7 @@ import cv2
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import os
|
||||
import psutil
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -188,12 +189,12 @@ def draw_box_with_label(
|
||||
)
|
||||
|
||||
|
||||
def calculate_region(frame_shape, xmin, ymin, xmax, ymax, multiplier=2):
|
||||
def calculate_region(frame_shape, xmin, ymin, xmax, ymax, model_size, multiplier=2):
|
||||
# size is the longest edge and divisible by 4
|
||||
size = int(max(xmax - xmin, ymax - ymin) // 4 * 4 * multiplier)
|
||||
# dont go any smaller than 300
|
||||
if size < 300:
|
||||
size = 300
|
||||
size = int((max(xmax - xmin, ymax - ymin) * multiplier) // 4 * 4)
|
||||
# dont go any smaller than the model_size
|
||||
if size < model_size:
|
||||
size = model_size
|
||||
|
||||
# x_offset is midpoint of bounding box minus half the size
|
||||
x_offset = int((xmax - xmin) / 2.0 + xmin - size / 2.0)
|
||||
@@ -534,7 +535,13 @@ def clipped(obj, frame_shape):
|
||||
|
||||
|
||||
def restart_frigate():
|
||||
os.kill(os.getpid(), signal.SIGTERM)
|
||||
proc = psutil.Process(1)
|
||||
# if this is running via s6, sigterm pid 1
|
||||
if proc.name() == "s6-svscan":
|
||||
proc.terminate()
|
||||
# otherwise, just try and exit frigate
|
||||
else:
|
||||
os.kill(os.getpid(), signal.SIGTERM)
|
||||
|
||||
|
||||
class EventsPerSecond:
|
||||
@@ -594,6 +601,24 @@ def add_mask(mask, mask_img):
|
||||
)
|
||||
cv2.fillPoly(mask_img, pts=[contour], color=(0))
|
||||
|
||||
def load_labels(path, encoding="utf-8"):
|
||||
"""Loads labels from file (with or without index numbers).
|
||||
Args:
|
||||
path: path to label file.
|
||||
encoding: label file encoding.
|
||||
Returns:
|
||||
Dictionary mapping indices to labels.
|
||||
"""
|
||||
with open(path, "r", encoding=encoding) as f:
|
||||
lines = f.readlines()
|
||||
if not lines:
|
||||
return {}
|
||||
|
||||
if lines[0].split(" ", maxsplit=1)[0].isdigit():
|
||||
pairs = [line.split(" ", maxsplit=1) for line in lines]
|
||||
return {int(index): label.strip() for index, label in pairs}
|
||||
else:
|
||||
return {index: line.strip() for index, line in enumerate(lines)}
|
||||
|
||||
class FrameManager(ABC):
|
||||
@abstractmethod
|
||||
|
||||
186
frigate/video.py
@@ -3,18 +3,19 @@ import itertools
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
import queue
|
||||
import subprocess as sp
|
||||
import random
|
||||
import signal
|
||||
import subprocess as sp
|
||||
import threading
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from setproctitle import setproctitle
|
||||
from typing import Dict, List
|
||||
|
||||
from cv2 import cv2
|
||||
import numpy as np
|
||||
from cv2 import cv2, reduce
|
||||
from setproctitle import setproctitle
|
||||
|
||||
from frigate.config import CameraConfig
|
||||
from frigate.config import CameraConfig, DetectConfig
|
||||
from frigate.edgetpu import RemoteObjectDetector
|
||||
from frigate.log import LogPipe
|
||||
from frigate.motion import MotionDetector
|
||||
@@ -23,8 +24,11 @@ from frigate.util import (
|
||||
EventsPerSecond,
|
||||
FrameManager,
|
||||
SharedMemoryFrameManager,
|
||||
area,
|
||||
calculate_region,
|
||||
clipped,
|
||||
intersection,
|
||||
intersection_over_union,
|
||||
listen,
|
||||
yuv_region_2_rgb,
|
||||
)
|
||||
@@ -364,6 +368,7 @@ def track_camera(
|
||||
frame_queue,
|
||||
frame_shape,
|
||||
model_shape,
|
||||
config.detect,
|
||||
frame_manager,
|
||||
motion_detector,
|
||||
object_detector,
|
||||
@@ -379,26 +384,36 @@ def track_camera(
|
||||
logger.info(f"{name}: exiting subprocess")
|
||||
|
||||
|
||||
def reduce_boxes(boxes):
|
||||
if len(boxes) == 0:
|
||||
return []
|
||||
reduced_boxes = cv2.groupRectangles(
|
||||
[list(b) for b in itertools.chain(boxes, boxes)], 1, 0.2
|
||||
)[0]
|
||||
return [tuple(b) for b in reduced_boxes]
|
||||
def box_overlaps(b1, b2):
|
||||
if b1[2] < b2[0] or b1[0] > b2[2] or b1[1] > b2[3] or b1[3] < b2[1]:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def reduce_boxes(boxes, iou_threshold=0.0):
|
||||
clusters = []
|
||||
|
||||
for box in boxes:
|
||||
matched = 0
|
||||
for cluster in clusters:
|
||||
if intersection_over_union(box, cluster) > iou_threshold:
|
||||
matched = 1
|
||||
cluster[0] = min(cluster[0], box[0])
|
||||
cluster[1] = min(cluster[1], box[1])
|
||||
cluster[2] = max(cluster[2], box[2])
|
||||
cluster[3] = max(cluster[3], box[3])
|
||||
|
||||
if not matched:
|
||||
clusters.append(list(box))
|
||||
|
||||
return [tuple(c) for c in clusters]
|
||||
|
||||
|
||||
# modified from https://stackoverflow.com/a/40795835
|
||||
def intersects_any(box_a, boxes):
|
||||
for box in boxes:
|
||||
if (
|
||||
box_a[2] < box[0]
|
||||
or box_a[0] > box[2]
|
||||
or box_a[1] > box[3]
|
||||
or box_a[3] < box[1]
|
||||
):
|
||||
continue
|
||||
return True
|
||||
if box_overlaps(box_a, box):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def detect(
|
||||
@@ -434,6 +449,7 @@ def process_frames(
|
||||
frame_queue: mp.Queue,
|
||||
frame_shape,
|
||||
model_shape,
|
||||
detect_config: DetectConfig,
|
||||
frame_manager: FrameManager,
|
||||
motion_detector: MotionDetector,
|
||||
object_detector: RemoteObjectDetector,
|
||||
@@ -454,6 +470,8 @@ def process_frames(
|
||||
fps_tracker = EventsPerSecond()
|
||||
fps_tracker.start()
|
||||
|
||||
startup_scan_counter = 0
|
||||
|
||||
while not stop_event.is_set():
|
||||
if exit_on_empty and frame_queue.empty():
|
||||
logger.info(f"Exiting track_objects...")
|
||||
@@ -487,33 +505,86 @@ def process_frames(
|
||||
# look for motion
|
||||
motion_boxes = motion_detector.detect(frame)
|
||||
|
||||
# only get the tracked object boxes that intersect with motion
|
||||
# get stationary object ids
|
||||
# check every Nth frame for stationary objects
|
||||
# disappeared objects are not stationary
|
||||
# also check for overlapping motion boxes
|
||||
stationary_object_ids = [
|
||||
obj["id"]
|
||||
for obj in object_tracker.tracked_objects.values()
|
||||
# if there hasn't been motion for 10 frames
|
||||
if obj["motionless_count"] >= 10
|
||||
# and it isn't due for a periodic check
|
||||
and (
|
||||
detect_config.stationary_interval == 0
|
||||
or obj["motionless_count"] % detect_config.stationary_interval != 0
|
||||
)
|
||||
# and it hasn't disappeared
|
||||
and object_tracker.disappeared[obj["id"]] == 0
|
||||
# and it doesn't overlap with any current motion boxes
|
||||
and not intersects_any(obj["box"], motion_boxes)
|
||||
]
|
||||
|
||||
# get tracked object boxes that aren't stationary
|
||||
tracked_object_boxes = [
|
||||
obj["box"]
|
||||
for obj in object_tracker.tracked_objects.values()
|
||||
if intersects_any(obj["box"], motion_boxes)
|
||||
if not obj["id"] in stationary_object_ids
|
||||
]
|
||||
|
||||
# combine motion boxes with known locations of existing objects
|
||||
combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)
|
||||
|
||||
region_min_size = max(model_shape[0], model_shape[1])
|
||||
# compute regions
|
||||
regions = [
|
||||
calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
|
||||
calculate_region(
|
||||
frame_shape,
|
||||
a[0],
|
||||
a[1],
|
||||
a[2],
|
||||
a[3],
|
||||
region_min_size,
|
||||
multiplier=random.uniform(1.2, 1.5),
|
||||
)
|
||||
for a in combined_boxes
|
||||
]
|
||||
|
||||
# combine overlapping regions
|
||||
combined_regions = reduce_boxes(regions)
|
||||
|
||||
# re-compute regions
|
||||
# consolidate regions with heavy overlap
|
||||
regions = [
|
||||
calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.0)
|
||||
for a in combined_regions
|
||||
calculate_region(
|
||||
frame_shape, a[0], a[1], a[2], a[3], region_min_size, multiplier=1.0
|
||||
)
|
||||
for a in reduce_boxes(regions, 0.4)
|
||||
]
|
||||
|
||||
# if starting up, get the next startup scan region
|
||||
if startup_scan_counter < 9:
|
||||
ymin = int(frame_shape[0] / 3 * startup_scan_counter / 3)
|
||||
ymax = int(frame_shape[0] / 3 + ymin)
|
||||
xmin = int(frame_shape[1] / 3 * startup_scan_counter / 3)
|
||||
xmax = int(frame_shape[1] / 3 + xmin)
|
||||
regions.append(
|
||||
calculate_region(
|
||||
frame_shape, xmin, ymin, xmax, ymax, region_min_size, multiplier=1.2
|
||||
)
|
||||
)
|
||||
startup_scan_counter += 1
|
||||
|
||||
# resize regions and detect
|
||||
detections = []
|
||||
# seed with stationary objects
|
||||
detections = [
|
||||
(
|
||||
obj["label"],
|
||||
obj["score"],
|
||||
obj["box"],
|
||||
obj["area"],
|
||||
obj["region"],
|
||||
)
|
||||
for obj in object_tracker.tracked_objects.values()
|
||||
if obj["id"] in stationary_object_ids
|
||||
]
|
||||
|
||||
for region in regions:
|
||||
detections.extend(
|
||||
detect(
|
||||
@@ -529,7 +600,7 @@ def process_frames(
|
||||
#########
|
||||
# merge objects, check for clipped objects and look again up to 4 times
|
||||
#########
|
||||
refining = True
|
||||
refining = len(regions) > 0
|
||||
refine_count = 0
|
||||
while refining and refine_count < 4:
|
||||
refining = False
|
||||
@@ -556,7 +627,7 @@ def process_frames(
|
||||
box = obj[2]
|
||||
# calculate a new region that will hopefully get the entire object
|
||||
region = calculate_region(
|
||||
frame_shape, box[0], box[1], box[2], box[3]
|
||||
frame_shape, box[0], box[1], box[2], box[3], region_min_size
|
||||
)
|
||||
|
||||
regions.append(region)
|
||||
@@ -582,14 +653,51 @@ def process_frames(
|
||||
if refining:
|
||||
refine_count += 1
|
||||
|
||||
# Limit to the detections overlapping with motion areas
|
||||
# to avoid picking up stationary background objects
|
||||
detections_with_motion = [
|
||||
d for d in detections if intersects_any(d[2], motion_boxes)
|
||||
]
|
||||
## drop detections that overlap too much
|
||||
consolidated_detections = []
|
||||
|
||||
# now that we have refined our detections, we need to track objects
|
||||
object_tracker.match_and_update(frame_time, detections_with_motion)
|
||||
# if detection was run on this frame, consolidate
|
||||
if len(regions) > 0:
|
||||
# group by name
|
||||
detected_object_groups = defaultdict(lambda: [])
|
||||
for detection in detections:
|
||||
detected_object_groups[detection[0]].append(detection)
|
||||
|
||||
# loop over detections grouped by label
|
||||
for group in detected_object_groups.values():
|
||||
# if the group only has 1 item, skip
|
||||
if len(group) == 1:
|
||||
consolidated_detections.append(group[0])
|
||||
continue
|
||||
|
||||
# sort smallest to largest by area
|
||||
sorted_by_area = sorted(group, key=lambda g: g[3])
|
||||
|
||||
for current_detection_idx in range(0, len(sorted_by_area)):
|
||||
current_detection = sorted_by_area[current_detection_idx][2]
|
||||
overlap = 0
|
||||
for to_check_idx in range(
|
||||
min(current_detection_idx + 1, len(sorted_by_area)),
|
||||
len(sorted_by_area),
|
||||
):
|
||||
to_check = sorted_by_area[to_check_idx][2]
|
||||
# if 90% of smaller detection is inside of another detection, consolidate
|
||||
if (
|
||||
area(intersection(current_detection, to_check))
|
||||
/ area(current_detection)
|
||||
> 0.9
|
||||
):
|
||||
overlap = 1
|
||||
break
|
||||
if overlap == 0:
|
||||
consolidated_detections.append(
|
||||
sorted_by_area[current_detection_idx]
|
||||
)
|
||||
# now that we have refined our detections, we need to track objects
|
||||
object_tracker.match_and_update(frame_time, consolidated_detections)
|
||||
# else, just update the frame times for the stationary objects
|
||||
else:
|
||||
object_tracker.update_frame_times(frame_time)
|
||||
|
||||
# add to the queue if not full
|
||||
if detected_objects_queue.full():
|
||||
|
||||
@@ -5,6 +5,10 @@ import time
|
||||
import os
|
||||
import signal
|
||||
|
||||
from frigate.util import (
|
||||
restart_frigate,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -30,6 +34,6 @@ class FrigateWatchdog(threading.Thread):
|
||||
detector.start_or_restart()
|
||||
elif not detector.detect_process.is_alive():
|
||||
logger.info("Detection appears to have stopped. Exiting frigate...")
|
||||
os.kill(os.getpid(), signal.SIGTERM)
|
||||
restart_frigate()
|
||||
|
||||
logger.info(f"Exiting watchdog...")
|
||||
|
||||
@@ -28,17 +28,19 @@ SQL = pw.SQL
|
||||
|
||||
|
||||
def migrate(migrator, database, fake=False, **kwargs):
|
||||
migrator.create_model(Recordings)
|
||||
|
||||
def add_index():
|
||||
# First add the index here, because there is a bug in peewee_migrate
|
||||
# when trying to create an multi-column index in the same migration
|
||||
# as the table: https://github.com/klen/peewee_migrate/issues/19
|
||||
Recordings.add_index("start_time", "end_time")
|
||||
Recordings.create_table()
|
||||
|
||||
migrator.python(add_index)
|
||||
migrator.sql(
|
||||
'CREATE TABLE IF NOT EXISTS "recordings" ("id" VARCHAR(30) NOT NULL PRIMARY KEY, "camera" VARCHAR(20) NOT NULL, "path" VARCHAR(255) NOT NULL, "start_time" DATETIME NOT NULL, "end_time" DATETIME NOT NULL, "duration" REAL NOT NULL)'
|
||||
)
|
||||
migrator.sql(
|
||||
'CREATE INDEX IF NOT EXISTS "recordings_camera" ON "recordings" ("camera")'
|
||||
)
|
||||
migrator.sql(
|
||||
'CREATE UNIQUE INDEX IF NOT EXISTS "recordings_path" ON "recordings" ("path")'
|
||||
)
|
||||
migrator.sql(
|
||||
'CREATE INDEX IF NOT EXISTS "recordings_start_time_end_time" ON "recordings" (start_time, end_time)'
|
||||
)
|
||||
|
||||
|
||||
def rollback(migrator, database, fake=False, **kwargs):
|
||||
migrator.remove_model(Recordings)
|
||||
pass
|
||||
|
||||
48
migrations/004_add_bbox_region_area.py
Normal file
@@ -0,0 +1,48 @@
|
||||
"""Peewee migrations -- 004_add_bbox_region_area.py.
|
||||
|
||||
Some examples (model - class or model name)::
|
||||
|
||||
> Model = migrator.orm['model_name'] # Return model in current state by name
|
||||
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.python(func, *args, **kwargs) # Run python code
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
|
||||
"""
|
||||
|
||||
import datetime as dt
|
||||
import peewee as pw
|
||||
from playhouse.sqlite_ext import *
|
||||
from decimal import ROUND_HALF_EVEN
|
||||
from frigate.models import Event
|
||||
|
||||
try:
|
||||
import playhouse.postgres_ext as pw_pext
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
SQL = pw.SQL
|
||||
|
||||
|
||||
def migrate(migrator, database, fake=False, **kwargs):
|
||||
migrator.add_fields(
|
||||
Event,
|
||||
region=JSONField(default=[]),
|
||||
box=JSONField(default=[]),
|
||||
area=pw.IntegerField(default=0),
|
||||
)
|
||||
|
||||
|
||||
def rollback(migrator, database, fake=False, **kwargs):
|
||||
migrator.remove_fields(Event, ["region", "box", "area"])
|
||||
43
migrations/005_make_end_time_nullable.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""Peewee migrations -- 004_add_bbox_region_area.py.
|
||||
|
||||
Some examples (model - class or model name)::
|
||||
|
||||
> Model = migrator.orm['model_name'] # Return model in current state by name
|
||||
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.python(func, *args, **kwargs) # Run python code
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
|
||||
"""
|
||||
|
||||
import datetime as dt
|
||||
import peewee as pw
|
||||
from playhouse.sqlite_ext import *
|
||||
from decimal import ROUND_HALF_EVEN
|
||||
from frigate.models import Event
|
||||
|
||||
try:
|
||||
import playhouse.postgres_ext as pw_pext
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
SQL = pw.SQL
|
||||
|
||||
|
||||
def migrate(migrator, database, fake=False, **kwargs):
|
||||
migrator.drop_not_null(Event, "end_time")
|
||||
|
||||
|
||||
def rollback(migrator, database, fake=False, **kwargs):
|
||||
pass
|
||||
47
migrations/006_add_motion_active_objects.py
Normal file
@@ -0,0 +1,47 @@
|
||||
"""Peewee migrations -- 004_add_bbox_region_area.py.
|
||||
|
||||
Some examples (model - class or model name)::
|
||||
|
||||
> Model = migrator.orm['model_name'] # Return model in current state by name
|
||||
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.python(func, *args, **kwargs) # Run python code
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
|
||||
"""
|
||||
|
||||
import datetime as dt
|
||||
import peewee as pw
|
||||
from playhouse.sqlite_ext import *
|
||||
from decimal import ROUND_HALF_EVEN
|
||||
from frigate.models import Recordings
|
||||
|
||||
try:
|
||||
import playhouse.postgres_ext as pw_pext
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
SQL = pw.SQL
|
||||
|
||||
|
||||
def migrate(migrator, database, fake=False, **kwargs):
|
||||
migrator.add_fields(
|
||||
Recordings,
|
||||
objects=pw.IntegerField(null=True),
|
||||
motion=pw.IntegerField(null=True),
|
||||
)
|
||||
|
||||
|
||||
def rollback(migrator, database, fake=False, **kwargs):
|
||||
migrator.remove_fields(Recordings, ["objects", "motion"])
|
||||
@@ -1,23 +1,26 @@
|
||||
import datetime
|
||||
import sys
|
||||
from typing_extensions import runtime
|
||||
|
||||
sys.path.append("/lab/frigate")
|
||||
|
||||
import json
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
import os
|
||||
import subprocess as sp
|
||||
import sys
|
||||
from unittest import TestCase, main
|
||||
|
||||
import click
|
||||
import csv
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.edgetpu import LocalObjectDetector
|
||||
from frigate.motion import MotionDetector
|
||||
from frigate.object_processing import CameraState
|
||||
from frigate.objects import ObjectTracker
|
||||
from frigate.util import (
|
||||
DictFrameManager,
|
||||
EventsPerSecond,
|
||||
SharedMemoryFrameManager,
|
||||
draw_box_with_label,
|
||||
@@ -96,20 +99,22 @@ class ProcessClip:
|
||||
ffmpeg_process.wait()
|
||||
ffmpeg_process.communicate()
|
||||
|
||||
def process_frames(self, objects_to_track=["person"], object_filters={}):
|
||||
def process_frames(
|
||||
self, object_detector, objects_to_track=["person"], object_filters={}
|
||||
):
|
||||
mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
|
||||
mask[:] = 255
|
||||
motion_detector = MotionDetector(
|
||||
self.frame_shape, mask, self.camera_config.motion
|
||||
)
|
||||
motion_detector = MotionDetector(self.frame_shape, self.camera_config.motion)
|
||||
motion_detector.save_images = False
|
||||
|
||||
object_detector = LocalObjectDetector(labels="/labelmap.txt")
|
||||
object_tracker = ObjectTracker(self.camera_config.detect)
|
||||
process_info = {
|
||||
"process_fps": mp.Value("d", 0.0),
|
||||
"detection_fps": mp.Value("d", 0.0),
|
||||
"detection_frame": mp.Value("d", 0.0),
|
||||
}
|
||||
|
||||
detection_enabled = mp.Value("d", 1)
|
||||
stop_event = mp.Event()
|
||||
model_shape = (self.config.model.height, self.config.model.width)
|
||||
|
||||
@@ -118,6 +123,7 @@ class ProcessClip:
|
||||
self.frame_queue,
|
||||
self.frame_shape,
|
||||
model_shape,
|
||||
self.camera_config.detect,
|
||||
self.frame_manager,
|
||||
motion_detector,
|
||||
object_detector,
|
||||
@@ -126,25 +132,16 @@ class ProcessClip:
|
||||
process_info,
|
||||
objects_to_track,
|
||||
object_filters,
|
||||
mask,
|
||||
detection_enabled,
|
||||
stop_event,
|
||||
exit_on_empty=True,
|
||||
)
|
||||
|
||||
def top_object(self, debug_path=None):
|
||||
obj_detected = False
|
||||
top_computed_score = 0.0
|
||||
|
||||
def handle_event(name, obj, frame_time):
|
||||
nonlocal obj_detected
|
||||
nonlocal top_computed_score
|
||||
if obj.computed_score > top_computed_score:
|
||||
top_computed_score = obj.computed_score
|
||||
if not obj.false_positive:
|
||||
obj_detected = True
|
||||
|
||||
self.camera_state.on("new", handle_event)
|
||||
self.camera_state.on("update", handle_event)
|
||||
def stats(self, debug_path=None):
|
||||
total_regions = 0
|
||||
total_motion_boxes = 0
|
||||
object_ids = set()
|
||||
total_frames = 0
|
||||
|
||||
while not self.detected_objects_queue.empty():
|
||||
(
|
||||
@@ -154,7 +151,8 @@ class ProcessClip:
|
||||
motion_boxes,
|
||||
regions,
|
||||
) = self.detected_objects_queue.get()
|
||||
if not debug_path is None:
|
||||
|
||||
if debug_path:
|
||||
self.save_debug_frame(
|
||||
debug_path, frame_time, current_tracked_objects.values()
|
||||
)
|
||||
@@ -162,10 +160,26 @@ class ProcessClip:
|
||||
self.camera_state.update(
|
||||
frame_time, current_tracked_objects, motion_boxes, regions
|
||||
)
|
||||
total_regions += len(regions)
|
||||
total_motion_boxes += len(motion_boxes)
|
||||
top_score = 0
|
||||
for id, obj in self.camera_state.tracked_objects.items():
|
||||
if not obj.false_positive:
|
||||
object_ids.add(id)
|
||||
if obj.top_score > top_score:
|
||||
top_score = obj.top_score
|
||||
|
||||
self.frame_manager.delete(self.camera_state.previous_frame_id)
|
||||
total_frames += 1
|
||||
|
||||
return {"object_detected": obj_detected, "top_score": top_computed_score}
|
||||
self.frame_manager.delete(self.camera_state.previous_frame_id)
|
||||
|
||||
return {
|
||||
"total_regions": total_regions,
|
||||
"total_motion_boxes": total_motion_boxes,
|
||||
"true_positive_objects": len(object_ids),
|
||||
"total_frames": total_frames,
|
||||
"top_score": top_score,
|
||||
}
|
||||
|
||||
def save_debug_frame(self, debug_path, frame_time, tracked_objects):
|
||||
current_frame = cv2.cvtColor(
|
||||
@@ -178,7 +192,6 @@ class ProcessClip:
|
||||
for obj in tracked_objects:
|
||||
thickness = 2
|
||||
color = (0, 0, 175)
|
||||
|
||||
if obj["frame_time"] != frame_time:
|
||||
thickness = 1
|
||||
color = (255, 0, 0)
|
||||
@@ -221,10 +234,9 @@ class ProcessClip:
|
||||
@click.command()
|
||||
@click.option("-p", "--path", required=True, help="Path to clip or directory to test.")
|
||||
@click.option("-l", "--label", default="person", help="Label name to detect.")
|
||||
@click.option("-t", "--threshold", default=0.85, help="Threshold value for objects.")
|
||||
@click.option("-s", "--scores", default=None, help="File to save csv of top scores")
|
||||
@click.option("-o", "--output", default=None, help="File to save csv of data")
|
||||
@click.option("--debug-path", default=None, help="Path to output frames for debugging.")
|
||||
def process(path, label, threshold, scores, debug_path):
|
||||
def process(path, label, output, debug_path):
|
||||
clips = []
|
||||
if os.path.isdir(path):
|
||||
files = os.listdir(path)
|
||||
@@ -235,51 +247,79 @@ def process(path, label, threshold, scores, debug_path):
|
||||
|
||||
json_config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"detectors": {"coral": {"type": "edgetpu", "device": "usb"}},
|
||||
"cameras": {
|
||||
"camera": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{
|
||||
"path": "path.mp4",
|
||||
"global_args": "",
|
||||
"input_args": "",
|
||||
"global_args": "-hide_banner",
|
||||
"input_args": "-loglevel info",
|
||||
"roles": ["detect"],
|
||||
}
|
||||
]
|
||||
},
|
||||
"height": 1920,
|
||||
"width": 1080,
|
||||
"rtmp": {"enabled": False},
|
||||
"record": {"enabled": False},
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
object_detector = LocalObjectDetector(labels="/labelmap.txt")
|
||||
|
||||
results = []
|
||||
for c in clips:
|
||||
logger.info(c)
|
||||
frame_shape = get_frame_shape(c)
|
||||
|
||||
json_config["cameras"]["camera"]["height"] = frame_shape[0]
|
||||
json_config["cameras"]["camera"]["width"] = frame_shape[1]
|
||||
json_config["cameras"]["camera"]["detect"] = {
|
||||
"height": frame_shape[0],
|
||||
"width": frame_shape[1],
|
||||
}
|
||||
json_config["cameras"]["camera"]["ffmpeg"]["inputs"][0]["path"] = c
|
||||
|
||||
config = FrigateConfig(config=FRIGATE_CONFIG_SCHEMA(json_config))
|
||||
frigate_config = FrigateConfig(**json_config)
|
||||
runtime_config = frigate_config.runtime_config
|
||||
runtime_config.cameras["camera"].create_ffmpeg_cmds()
|
||||
|
||||
process_clip = ProcessClip(c, frame_shape, config)
|
||||
process_clip = ProcessClip(c, frame_shape, runtime_config)
|
||||
process_clip.load_frames()
|
||||
process_clip.process_frames(objects_to_track=[label])
|
||||
process_clip.process_frames(object_detector, objects_to_track=[label])
|
||||
|
||||
results.append((c, process_clip.top_object(debug_path)))
|
||||
results.append((c, process_clip.stats(debug_path)))
|
||||
|
||||
if not scores is None:
|
||||
with open(scores, "w") as writer:
|
||||
for result in results:
|
||||
writer.write(f"{result[0]},{result[1]['top_score']}\n")
|
||||
|
||||
positive_count = sum(1 for result in results if result[1]["object_detected"])
|
||||
positive_count = sum(
|
||||
1 for result in results if result[1]["true_positive_objects"] > 0
|
||||
)
|
||||
print(
|
||||
f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s)."
|
||||
)
|
||||
|
||||
if output:
|
||||
# now we will open a file for writing
|
||||
data_file = open(output, "w")
|
||||
|
||||
# create the csv writer object
|
||||
csv_writer = csv.writer(data_file)
|
||||
|
||||
# Counter variable used for writing
|
||||
# headers to the CSV file
|
||||
count = 0
|
||||
|
||||
for result in results:
|
||||
if count == 0:
|
||||
|
||||
# Writing headers of CSV file
|
||||
header = ["file"] + list(result[1].keys())
|
||||
csv_writer.writerow(header)
|
||||
count += 1
|
||||
|
||||
# Writing data of CSV file
|
||||
csv_writer.writerow([result[0]] + list(result[1].values()))
|
||||
|
||||
data_file.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
process()
|
||||
769
web/package-lock.json
generated
@@ -14,7 +14,7 @@
|
||||
"@cycjimmy/jsmpeg-player": "^5.0.1",
|
||||
"date-fns": "^2.21.3",
|
||||
"idb-keyval": "^5.0.2",
|
||||
"immer": "^8.0.1",
|
||||
"immer": "^9.0.6",
|
||||
"preact": "^10.5.9",
|
||||
"preact-async-route": "^2.2.1",
|
||||
"preact-router": "^3.2.1",
|
||||
|
||||
|
Before Width: | Height: | Size: 3.3 KiB After Width: | Height: | Size: 3.9 KiB |
@@ -11,7 +11,8 @@
|
||||
<link rel="manifest" href="/site.webmanifest" />
|
||||
<link rel="mask-icon" href="/safari-pinned-tab.svg" color="#3b82f7" />
|
||||
<meta name="msapplication-TileColor" content="#3b82f7" />
|
||||
<meta name="theme-color" content="#ff0000" />
|
||||
<meta name="theme-color" content="#ffffff" media="(prefers-color-scheme: light)" />
|
||||
<meta name="theme-color" content="#111827" media="(prefers-color-scheme: dark)" />
|
||||
</head>
|
||||
<body>
|
||||
<div id="root" class="z-0"></div>
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"type": "image/png"
|
||||
}
|
||||
],
|
||||
"theme_color": "#ff0000",
|
||||
"background_color": "#ff0000",
|
||||
"theme_color": "#ffffff",
|
||||
"background_color": "#ffffff",
|
||||
"display": "standalone"
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ export default function Sidebar() {
|
||||
<Separator />
|
||||
</Fragment>
|
||||
) : null}
|
||||
<Destination className="self-end" href="https://blakeblackshear.github.io/frigate" text="Documentation" />
|
||||
<Destination className="self-end" href="https://docs.frigate.video" text="Documentation" />
|
||||
<Destination className="self-end" href="https://github.com/blakeblackshear/frigate" text="GitHub" />
|
||||
</NavigationDrawer>
|
||||
);
|
||||
|
||||
@@ -121,12 +121,12 @@ describe('MqttProvider', () => {
|
||||
</MqttProvider>
|
||||
);
|
||||
await screen.findByTestId('data');
|
||||
expect(screen.getByTestId('front/detect/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"ON"}');
|
||||
expect(screen.getByTestId('front/recordings/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
|
||||
expect(screen.getByTestId('front/snapshots/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"ON"}');
|
||||
expect(screen.getByTestId('side/detect/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
|
||||
expect(screen.getByTestId('side/recordings/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
|
||||
expect(screen.getByTestId('side/snapshots/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
|
||||
expect(screen.getByTestId('front/detect/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"ON","retain":true}');
|
||||
expect(screen.getByTestId('front/recordings/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF","retain":true}');
|
||||
expect(screen.getByTestId('front/snapshots/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"ON","retain":true}');
|
||||
expect(screen.getByTestId('side/detect/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF","retain":true}');
|
||||
expect(screen.getByTestId('side/recordings/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF","retain":true}');
|
||||
expect(screen.getByTestId('side/snapshots/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF","retain":true}');
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -42,9 +42,9 @@ export function MqttProvider({
|
||||
useEffect(() => {
|
||||
Object.keys(config.cameras).forEach((camera) => {
|
||||
const { name, record, detect, snapshots } = config.cameras[camera];
|
||||
dispatch({ topic: `${name}/recordings/state`, payload: record.enabled ? 'ON' : 'OFF' });
|
||||
dispatch({ topic: `${name}/detect/state`, payload: detect.enabled ? 'ON' : 'OFF' });
|
||||
dispatch({ topic: `${name}/snapshots/state`, payload: snapshots.enabled ? 'ON' : 'OFF' });
|
||||
dispatch({ topic: `${name}/recordings/state`, payload: record.enabled ? 'ON' : 'OFF', retain: true });
|
||||
dispatch({ topic: `${name}/detect/state`, payload: detect.enabled ? 'ON' : 'OFF', retain: true });
|
||||
dispatch({ topic: `${name}/snapshots/state`, payload: snapshots.enabled ? 'ON' : 'OFF', retain: true });
|
||||
});
|
||||
}, [config]);
|
||||
|
||||
|
||||
329
web/src/components/Calender.jsx
Normal file
@@ -0,0 +1,329 @@
|
||||
import { h } from 'preact';
|
||||
import { useEffect, useState, useCallback, useMemo, useRef } from 'preact/hooks';
|
||||
import ArrowRight from '../icons/ArrowRight';
|
||||
import ArrowRightDouble from '../icons/ArrowRightDouble';
|
||||
|
||||
const todayTimestamp = new Date().setHours(0, 0, 0, 0).valueOf();
|
||||
|
||||
const Calender = ({ onChange, calenderRef, close }) => {
|
||||
const keyRef = useRef([]);
|
||||
|
||||
const date = new Date();
|
||||
const year = date.getFullYear();
|
||||
const month = date.getMonth();
|
||||
|
||||
const daysMap = useMemo(() => ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'], []);
|
||||
const monthMap = useMemo(
|
||||
() => [
|
||||
'January',
|
||||
'February',
|
||||
'March',
|
||||
'April',
|
||||
'May',
|
||||
'June',
|
||||
'July',
|
||||
'August',
|
||||
'September',
|
||||
'October',
|
||||
'November',
|
||||
'December',
|
||||
],
|
||||
[]
|
||||
);
|
||||
|
||||
const [state, setState] = useState({
|
||||
getMonthDetails: [],
|
||||
year,
|
||||
month,
|
||||
selectedDay: null,
|
||||
timeRange: { before: null, after: null },
|
||||
monthDetails: null,
|
||||
});
|
||||
|
||||
const getNumberOfDays = useCallback((year, month) => {
|
||||
return 40 - new Date(year, month, 40).getDate();
|
||||
}, []);
|
||||
|
||||
const getDayDetails = useCallback(
|
||||
(args) => {
|
||||
const date = args.index - args.firstDay;
|
||||
const day = args.index % 7;
|
||||
let prevMonth = args.month - 1;
|
||||
let prevYear = args.year;
|
||||
if (prevMonth < 0) {
|
||||
prevMonth = 11;
|
||||
prevYear--;
|
||||
}
|
||||
const prevMonthNumberOfDays = getNumberOfDays(prevYear, prevMonth);
|
||||
const _date = (date < 0 ? prevMonthNumberOfDays + date : date % args.numberOfDays) + 1;
|
||||
const month = date < 0 ? -1 : date >= args.numberOfDays ? 1 : 0;
|
||||
const timestamp = new Date(args.year, args.month, _date).getTime();
|
||||
return {
|
||||
date: _date,
|
||||
day,
|
||||
month,
|
||||
timestamp,
|
||||
dayString: daysMap[day],
|
||||
};
|
||||
},
|
||||
[getNumberOfDays, daysMap]
|
||||
);
|
||||
|
||||
const getMonthDetails = useCallback(
|
||||
(year, month) => {
|
||||
const firstDay = new Date(year, month).getDay();
|
||||
const numberOfDays = getNumberOfDays(year, month);
|
||||
const monthArray = [];
|
||||
const rows = 6;
|
||||
let currentDay = null;
|
||||
let index = 0;
|
||||
const cols = 7;
|
||||
|
||||
for (let row = 0; row < rows; row++) {
|
||||
for (let col = 0; col < cols; col++) {
|
||||
currentDay = getDayDetails({
|
||||
index,
|
||||
numberOfDays,
|
||||
firstDay,
|
||||
year,
|
||||
month,
|
||||
});
|
||||
monthArray.push(currentDay);
|
||||
index++;
|
||||
}
|
||||
}
|
||||
return monthArray;
|
||||
},
|
||||
[getNumberOfDays, getDayDetails]
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
setState((prev) => ({ ...prev, selectedDay: todayTimestamp, monthDetails: getMonthDetails(year, month) }));
|
||||
}, [year, month, getMonthDetails]);
|
||||
|
||||
useEffect(() => {
|
||||
// add refs for keyboard navigation
|
||||
if (state.monthDetails) {
|
||||
keyRef.current = keyRef.current.slice(0, state.monthDetails.length);
|
||||
}
|
||||
// set today date in focus for keyboard navigation
|
||||
const todayDate = new Date(todayTimestamp).getDate();
|
||||
keyRef.current.find((t) => t.tabIndex === todayDate)?.focus();
|
||||
}, [state.monthDetails]);
|
||||
|
||||
const isCurrentDay = (day) => day.timestamp === todayTimestamp;
|
||||
|
||||
const isSelectedRange = useCallback(
|
||||
(day) => {
|
||||
if (!state.timeRange.after || !state.timeRange.before) return;
|
||||
|
||||
return day.timestamp < state.timeRange.before && day.timestamp >= state.timeRange.after;
|
||||
},
|
||||
[state.timeRange]
|
||||
);
|
||||
|
||||
const isFirstDayInRange = useCallback(
|
||||
(day) => {
|
||||
if (isCurrentDay(day)) return;
|
||||
return state.timeRange.after === day.timestamp;
|
||||
},
|
||||
[state.timeRange.after]
|
||||
);
|
||||
|
||||
const isLastDayInRange = useCallback(
|
||||
(day) => {
|
||||
return state.timeRange.before === new Date(day.timestamp).setHours(24, 0, 0, 0);
|
||||
},
|
||||
[state.timeRange.before]
|
||||
);
|
||||
|
||||
const getMonthStr = useCallback(
|
||||
(month) => {
|
||||
return monthMap[Math.max(Math.min(11, month), 0)] || 'Month';
|
||||
},
|
||||
[monthMap]
|
||||
);
|
||||
|
||||
const onDateClick = (day) => {
|
||||
const { before, after } = state.timeRange;
|
||||
let timeRange = { before: null, after: null };
|
||||
|
||||
// user has selected a date < after, reset values
|
||||
if (after === null || day.timestamp < after) {
|
||||
timeRange = { before: new Date(day.timestamp).setHours(24, 0, 0, 0), after: day.timestamp };
|
||||
}
|
||||
|
||||
// user has selected a date > after
|
||||
if (after !== null && before !== new Date(day.timestamp).setHours(24, 0, 0, 0) && day.timestamp > after) {
|
||||
timeRange = {
|
||||
after,
|
||||
before:
|
||||
day.timestamp >= todayTimestamp
|
||||
? new Date(todayTimestamp).setHours(24, 0, 0, 0)
|
||||
: new Date(day.timestamp).setHours(24, 0, 0, 0),
|
||||
};
|
||||
}
|
||||
|
||||
// reset values
|
||||
if (before === new Date(day.timestamp).setHours(24, 0, 0, 0)) {
|
||||
timeRange = { before: null, after: null };
|
||||
}
|
||||
|
||||
setState((prev) => ({
|
||||
...prev,
|
||||
timeRange,
|
||||
selectedDay: day.timestamp,
|
||||
}));
|
||||
|
||||
if (onChange) {
|
||||
onChange(timeRange.after ? { before: timeRange.before / 1000, after: timeRange.after / 1000 } : ['all']);
|
||||
}
|
||||
};
|
||||
|
||||
const setYear = useCallback(
|
||||
(offset) => {
|
||||
const year = state.year + offset;
|
||||
const month = state.month;
|
||||
setState((prev) => {
|
||||
return {
|
||||
...prev,
|
||||
year,
|
||||
monthDetails: getMonthDetails(year, month),
|
||||
};
|
||||
});
|
||||
},
|
||||
[state.year, state.month, getMonthDetails]
|
||||
);
|
||||
|
||||
const setMonth = (offset) => {
|
||||
let year = state.year;
|
||||
let month = state.month + offset;
|
||||
if (month === -1) {
|
||||
month = 11;
|
||||
year--;
|
||||
} else if (month === 12) {
|
||||
month = 0;
|
||||
year++;
|
||||
}
|
||||
setState((prev) => {
|
||||
return {
|
||||
...prev,
|
||||
year,
|
||||
month,
|
||||
monthDetails: getMonthDetails(year, month),
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
const handleKeydown = (e, day, index) => {
|
||||
if ((keyRef.current && e.key === 'Enter') || e.keyCode === 32) {
|
||||
e.preventDefault();
|
||||
day.month === 0 && onDateClick(day);
|
||||
}
|
||||
if (e.key === 'ArrowLeft') {
|
||||
index > 0 && keyRef.current[index - 1].focus();
|
||||
}
|
||||
if (e.key === 'ArrowRight') {
|
||||
index < 41 && keyRef.current[index + 1].focus();
|
||||
}
|
||||
if (e.key === 'ArrowUp') {
|
||||
e.preventDefault();
|
||||
index > 6 && keyRef.current[index - 7].focus();
|
||||
}
|
||||
if (e.key === 'ArrowDown') {
|
||||
e.preventDefault();
|
||||
index < 36 && keyRef.current[index + 7].focus();
|
||||
}
|
||||
if (e.key === 'Escape') {
|
||||
close();
|
||||
}
|
||||
};
|
||||
|
||||
const renderCalendar = () => {
|
||||
const days =
|
||||
state.monthDetails &&
|
||||
state.monthDetails.map((day, idx) => {
|
||||
return (
|
||||
<div
|
||||
onClick={() => onDateClick(day)}
|
||||
onkeydown={(e) => handleKeydown(e, day, idx)}
|
||||
ref={(ref) => (keyRef.current[idx] = ref)}
|
||||
tabIndex={day.month === 0 ? day.date : null}
|
||||
className={`h-12 w-12 float-left flex flex-shrink justify-center items-center cursor-pointer ${
|
||||
day.month !== 0 ? ' opacity-50 bg-gray-700 dark:bg-gray-700 pointer-events-none' : ''
|
||||
}
|
||||
${isFirstDayInRange(day) ? ' rounded-l-xl ' : ''}
|
||||
${isSelectedRange(day) ? ' bg-blue-600 dark:hover:bg-blue-600' : ''}
|
||||
${isLastDayInRange(day) ? ' rounded-r-xl ' : ''}
|
||||
${isCurrentDay(day) && !isLastDayInRange(day) ? 'rounded-full bg-gray-100 dark:hover:bg-gray-100 ' : ''}`}
|
||||
key={idx}
|
||||
>
|
||||
<div className="font-light">
|
||||
<span className="text-gray-400">{day.date}</span>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
});
|
||||
|
||||
return (
|
||||
<div>
|
||||
<div className="w-full flex justify-start flex-shrink">
|
||||
{['SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT'].map((d, i) => (
|
||||
<div key={i} className="w-12 text-xs font-light text-center">
|
||||
{d}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
<div className="w-full h-56">{days}</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="select-none w-96 flex flex-shrink" ref={calenderRef}>
|
||||
<div className="py-4 px-6">
|
||||
<div className="flex items-center">
|
||||
<div className="w-1/6 relative flex justify-around">
|
||||
<div
|
||||
tabIndex={100}
|
||||
className="flex justify-center items-center cursor-pointer absolute -mt-4 text-center rounded-full w-10 h-10 bg-gray-500 hover:bg-gray-200 dark:hover:bg-gray-800"
|
||||
onClick={() => setYear(-1)}
|
||||
>
|
||||
<ArrowRightDouble className="h-2/6 transform rotate-180 " />
|
||||
</div>
|
||||
</div>
|
||||
<div className="w-1/6 relative flex justify-around ">
|
||||
<div
|
||||
tabIndex={101}
|
||||
className="flex justify-center items-center cursor-pointer absolute -mt-4 text-center rounded-full w-10 h-10 bg-gray-500 hover:bg-gray-200 dark:hover:bg-gray-800"
|
||||
onClick={() => setMonth(-1)}
|
||||
>
|
||||
<ArrowRight className="h-2/6 transform rotate-180 red" />
|
||||
</div>
|
||||
</div>
|
||||
<div className="w-1/3">
|
||||
<div className="text-3xl text-center text-gray-200 font-extralight">{state.year}</div>
|
||||
<div className="text-center text-gray-400 font-extralight">{getMonthStr(state.month)}</div>
|
||||
</div>
|
||||
<div className="w-1/6 relative flex justify-around ">
|
||||
<div
|
||||
tabIndex={102}
|
||||
className="flex justify-center items-center cursor-pointer absolute -mt-4 text-center rounded-full w-10 h-10 bg-gray-500 hover:bg-gray-200 dark:hover:bg-gray-800"
|
||||
onClick={() => setMonth(1)}
|
||||
>
|
||||
<ArrowRight className="h-2/6" />
|
||||
</div>
|
||||
</div>
|
||||
<div className="w-1/6 relative flex justify-around " tabIndex={104} onClick={() => setYear(1)}>
|
||||
<div className="flex justify-center items-center cursor-pointer absolute -mt-4 text-center rounded-full w-10 h-10 bg-gray-500 hover:bg-gray-200 dark:hover:bg-gray-800">
|
||||
<ArrowRightDouble className="h-2/6" />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div className="mt-3">{renderCalendar()}</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default Calender;
|
||||
162
web/src/components/DatePicker.jsx
Normal file
@@ -0,0 +1,162 @@
|
||||
import { h } from 'preact';
|
||||
import { useCallback, useEffect, useState } from 'preact/hooks';
|
||||
|
||||
export const DateFilterOptions = [
|
||||
{
|
||||
label: 'All',
|
||||
value: ['all'],
|
||||
},
|
||||
{
|
||||
label: 'Today',
|
||||
value: {
|
||||
//Before
|
||||
before: new Date().setHours(24, 0, 0, 0) / 1000,
|
||||
//After
|
||||
after: new Date().setHours(0, 0, 0, 0) / 1000,
|
||||
},
|
||||
},
|
||||
{
|
||||
label: 'Yesterday',
|
||||
value: {
|
||||
//Before
|
||||
before: new Date(new Date().setDate(new Date().getDate() - 1)).setHours(24, 0, 0, 0) / 1000,
|
||||
//After
|
||||
after: new Date(new Date().setDate(new Date().getDate() - 1)).setHours(0, 0, 0, 0) / 1000,
|
||||
},
|
||||
},
|
||||
{
|
||||
label: 'Last 7 Days',
|
||||
value: {
|
||||
//Before
|
||||
before: new Date().setHours(24, 0, 0, 0) / 1000,
|
||||
//After
|
||||
after: new Date(new Date().setDate(new Date().getDate() - 7)).setHours(0, 0, 0, 0) / 1000,
|
||||
},
|
||||
},
|
||||
{
|
||||
label: 'This Month',
|
||||
value: {
|
||||
//Before
|
||||
before: new Date().setHours(24, 0, 0, 0) / 1000,
|
||||
//After
|
||||
after: new Date(new Date().getFullYear(), new Date().getMonth(), 1).getTime() / 1000,
|
||||
},
|
||||
},
|
||||
{
|
||||
label: 'Last Month',
|
||||
value: {
|
||||
//Before
|
||||
before: new Date(new Date().getFullYear(), new Date().getMonth(), 1).getTime() / 1000,
|
||||
//After
|
||||
after: new Date(new Date().getFullYear(), new Date().getMonth() - 1, 1).getTime() / 1000,
|
||||
},
|
||||
},
|
||||
{
|
||||
label: 'Custom Range',
|
||||
value: 'custom_range',
|
||||
},
|
||||
];
|
||||
|
||||
export default function DatePicker({
|
||||
helpText,
|
||||
keyboardType = 'text',
|
||||
inputRef,
|
||||
label,
|
||||
leadingIcon: LeadingIcon,
|
||||
onBlur,
|
||||
onChangeText,
|
||||
onFocus,
|
||||
readonly,
|
||||
trailingIcon: TrailingIcon,
|
||||
value: propValue = '',
|
||||
...props
|
||||
}) {
|
||||
const [isFocused, setFocused] = useState(false);
|
||||
const [value, setValue] = useState(propValue);
|
||||
|
||||
useEffect(() => {
|
||||
if (propValue !== value) {
|
||||
setValue(propValue);
|
||||
}
|
||||
}, [propValue, setValue, value]);
|
||||
|
||||
const handleFocus = useCallback(
|
||||
(event) => {
|
||||
setFocused(true);
|
||||
onFocus && onFocus(event);
|
||||
},
|
||||
[onFocus]
|
||||
);
|
||||
|
||||
const handleBlur = useCallback(
|
||||
(event) => {
|
||||
setFocused(false);
|
||||
onBlur && onBlur(event);
|
||||
},
|
||||
[onBlur]
|
||||
);
|
||||
|
||||
const handleChange = useCallback(
|
||||
(event) => {
|
||||
const { value } = event.target;
|
||||
setValue(value);
|
||||
onChangeText && onChangeText(value);
|
||||
},
|
||||
[onChangeText, setValue]
|
||||
);
|
||||
|
||||
const onClick = (e) => {
|
||||
props.onclick(e);
|
||||
};
|
||||
const labelMoved = isFocused || value !== '';
|
||||
|
||||
return (
|
||||
<div className="w-full">
|
||||
{props.children}
|
||||
<div
|
||||
className={`bg-gray-100 dark:bg-gray-700 rounded rounded-b-none border-gray-400 border-b p-1 pl-4 pr-3 ${
|
||||
isFocused ? 'border-blue-500 dark:border-blue-500' : ''
|
||||
}`}
|
||||
ref={inputRef}
|
||||
>
|
||||
<label
|
||||
className="flex space-x-2 items-center"
|
||||
data-testid={`label-${label.toLowerCase().replace(/[^\w]+/g, '_')}`}
|
||||
>
|
||||
{LeadingIcon ? (
|
||||
<div className="w-10 h-full">
|
||||
<LeadingIcon />
|
||||
</div>
|
||||
) : null}
|
||||
<div className="relative w-full">
|
||||
<input
|
||||
className="h-6 mt-6 w-full bg-transparent focus:outline-none focus:ring-0"
|
||||
type={keyboardType}
|
||||
readOnly
|
||||
onBlur={handleBlur}
|
||||
onFocus={handleFocus}
|
||||
onInput={handleChange}
|
||||
tabIndex="0"
|
||||
onClick={onClick}
|
||||
value={propValue}
|
||||
{...props}
|
||||
/>
|
||||
<div
|
||||
className={`absolute top-3 transition transform text-gray-600 dark:text-gray-400 ${
|
||||
labelMoved ? 'text-xs -translate-y-2' : ''
|
||||
} ${isFocused ? 'text-blue-500 dark:text-blue-500' : ''}`}
|
||||
>
|
||||
<p>{label}</p>
|
||||
</div>
|
||||
</div>
|
||||
{TrailingIcon ? (
|
||||
<div className="w-10 h-10">
|
||||
<TrailingIcon />
|
||||
</div>
|
||||
) : null}
|
||||
</label>
|
||||
</div>
|
||||
{helpText ? <div className="text-xs pl-3 pt-1">{helpText}</div> : null}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -3,7 +3,7 @@ import { baseUrl } from '../api/baseUrl';
|
||||
import { useRef, useEffect } from 'preact/hooks';
|
||||
import JSMpeg from '@cycjimmy/jsmpeg-player';
|
||||
|
||||
export default function JSMpegPlayer({ camera }) {
|
||||
export default function JSMpegPlayer({ camera, width, height }) {
|
||||
const playerRef = useRef();
|
||||
const url = `${baseUrl.replace(/^http/, 'ws')}/live/${camera}`
|
||||
|
||||
@@ -32,6 +32,6 @@ export default function JSMpegPlayer({ camera }) {
|
||||
}, [url]);
|
||||
|
||||
return (
|
||||
<div ref={playerRef} class="jsmpeg" />
|
||||
<div ref={playerRef} class="jsmpeg" style={`max-height: ${height}px; max-width: ${width}px`} />
|
||||
);
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ export default function RelativeModal({
|
||||
|
||||
const handleKeydown = useCallback(
|
||||
(event) => {
|
||||
const focusable = ref.current.querySelectorAll('[tabindex]');
|
||||
const focusable = ref.current && ref.current.querySelectorAll('[tabindex]');
|
||||
if (event.key === 'Tab' && focusable.length) {
|
||||
if (event.shiftKey && document.activeElement === focusable[0]) {
|
||||
focusable[focusable.length - 1].focus();
|
||||
@@ -69,14 +69,15 @@ export default function RelativeModal({
|
||||
let newTop = top;
|
||||
let newLeft = left;
|
||||
|
||||
// too far right
|
||||
if (newLeft + width + WINDOW_PADDING >= windowWidth - WINDOW_PADDING) {
|
||||
newLeft = windowWidth - width - WINDOW_PADDING;
|
||||
}
|
||||
// too far left
|
||||
else if (left < WINDOW_PADDING) {
|
||||
if (left < WINDOW_PADDING) {
|
||||
newLeft = WINDOW_PADDING;
|
||||
}
|
||||
// too far right
|
||||
else if (newLeft + width + WINDOW_PADDING >= windowWidth - WINDOW_PADDING) {
|
||||
newLeft = windowWidth - width - WINDOW_PADDING;
|
||||
}
|
||||
|
||||
// too close to bottom
|
||||
if (top + menuHeight > windowHeight - WINDOW_PADDING + window.scrollY) {
|
||||
newTop = WINDOW_PADDING;
|
||||
|
||||
@@ -3,74 +3,27 @@ import ArrowDropdown from '../icons/ArrowDropdown';
|
||||
import ArrowDropup from '../icons/ArrowDropup';
|
||||
import Menu, { MenuItem } from './Menu';
|
||||
import TextField from './TextField';
|
||||
import DatePicker from './DatePicker';
|
||||
import Calender from './Calender';
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from 'preact/hooks';
|
||||
|
||||
export default function Select({ label, onChange, options: inputOptions = [], selected: propSelected }) {
|
||||
export default function Select({
|
||||
type,
|
||||
label,
|
||||
onChange,
|
||||
paramName,
|
||||
options: inputOptions = [],
|
||||
selected: propSelected,
|
||||
}) {
|
||||
const options = useMemo(
|
||||
() =>
|
||||
typeof inputOptions[0] === 'string' ? inputOptions.map((opt) => ({ value: opt, label: opt })) : inputOptions,
|
||||
[inputOptions]
|
||||
);
|
||||
|
||||
const [showMenu, setShowMenu] = useState(false);
|
||||
const [selected, setSelected] = useState(
|
||||
Math.max(
|
||||
options.findIndex(({ value }) => value === propSelected),
|
||||
0
|
||||
)
|
||||
);
|
||||
const [focused, setFocused] = useState(null);
|
||||
|
||||
const ref = useRef(null);
|
||||
|
||||
const handleSelect = useCallback(
|
||||
(value, label) => {
|
||||
setSelected(options.findIndex((opt) => opt.value === value));
|
||||
onChange && onChange(value, label);
|
||||
setShowMenu(false);
|
||||
},
|
||||
[onChange, options]
|
||||
);
|
||||
|
||||
const handleClick = useCallback(() => {
|
||||
setShowMenu(true);
|
||||
}, [setShowMenu]);
|
||||
|
||||
const handleKeydown = useCallback(
|
||||
(event) => {
|
||||
switch (event.key) {
|
||||
case 'Enter': {
|
||||
if (!showMenu) {
|
||||
setShowMenu(true);
|
||||
setFocused(selected);
|
||||
} else {
|
||||
setSelected(focused);
|
||||
onChange && onChange(options[focused].value, options[focused].label);
|
||||
setShowMenu(false);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'ArrowDown': {
|
||||
const newIndex = focused + 1;
|
||||
newIndex < options.length && setFocused(newIndex);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'ArrowUp': {
|
||||
const newIndex = focused - 1;
|
||||
newIndex > -1 && setFocused(newIndex);
|
||||
break;
|
||||
}
|
||||
|
||||
// no default
|
||||
}
|
||||
},
|
||||
[onChange, options, showMenu, setShowMenu, setFocused, focused, selected]
|
||||
);
|
||||
|
||||
const handleDismiss = useCallback(() => {
|
||||
setShowMenu(false);
|
||||
}, [setShowMenu]);
|
||||
const [selected, setSelected] = useState();
|
||||
const [datePickerValue, setDatePickerValue] = useState();
|
||||
|
||||
// Reset the state if the prop value changes
|
||||
useEffect(() => {
|
||||
@@ -85,25 +38,219 @@ export default function Select({ label, onChange, options: inputOptions = [], se
|
||||
// DO NOT include `selected`
|
||||
}, [options, propSelected]); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
|
||||
return (
|
||||
<Fragment>
|
||||
<TextField
|
||||
inputRef={ref}
|
||||
label={label}
|
||||
onchange={onChange}
|
||||
onclick={handleClick}
|
||||
onkeydown={handleKeydown}
|
||||
readonly
|
||||
trailingIcon={showMenu ? ArrowDropup : ArrowDropdown}
|
||||
value={options[selected]?.label}
|
||||
/>
|
||||
{showMenu ? (
|
||||
<Menu className="rounded-t-none" onDismiss={handleDismiss} relativeTo={ref} widthRelative>
|
||||
{options.map(({ value, label }, i) => (
|
||||
<MenuItem key={value} label={label} focus={focused === i} onSelect={handleSelect} value={value} />
|
||||
))}
|
||||
</Menu>
|
||||
) : null}
|
||||
</Fragment>
|
||||
useEffect(() => {
|
||||
if (type === 'datepicker') {
|
||||
if ('after' && 'before' in propSelected) {
|
||||
if (!propSelected.before || !propSelected.after) return setDatePickerValue('all');
|
||||
|
||||
for (let i = 0; i < inputOptions.length; i++) {
|
||||
if (
|
||||
inputOptions[i].value &&
|
||||
Object.entries(inputOptions[i].value).sort().toString() === Object.entries(propSelected).sort().toString()
|
||||
) {
|
||||
setDatePickerValue(inputOptions[i]?.label);
|
||||
break;
|
||||
} else {
|
||||
setDatePickerValue(
|
||||
`${new Date(propSelected.after * 1000).toLocaleDateString()} -> ${new Date(
|
||||
propSelected.before * 1000 - 1
|
||||
).toLocaleDateString()}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (type === 'dropdown') {
|
||||
setSelected(
|
||||
Math.max(
|
||||
options.findIndex(({ value }) => Object.values(propSelected).includes(value)),
|
||||
0
|
||||
)
|
||||
);
|
||||
}
|
||||
}, [type, options, inputOptions, propSelected, setSelected]);
|
||||
|
||||
const [focused, setFocused] = useState(null);
|
||||
const [showCalender, setShowCalender] = useState(false);
|
||||
const calenderRef = useRef(null);
|
||||
const ref = useRef(null);
|
||||
|
||||
const handleSelect = useCallback(
|
||||
(value) => {
|
||||
setSelected(options.findIndex(({ value }) => Object.values(propSelected).includes(value)));
|
||||
setShowMenu(false);
|
||||
|
||||
//show calender date range picker
|
||||
if (value === 'custom_range') return setShowCalender(true);
|
||||
onChange && onChange(value);
|
||||
},
|
||||
[onChange, options, propSelected, setSelected]
|
||||
);
|
||||
|
||||
const handleDateRange = useCallback(
|
||||
(range) => {
|
||||
onChange && onChange(range);
|
||||
setShowMenu(false);
|
||||
},
|
||||
[onChange]
|
||||
);
|
||||
|
||||
const handleClick = useCallback(() => {
|
||||
setShowMenu(true);
|
||||
}, [setShowMenu]);
|
||||
|
||||
const handleKeydownDatePicker = useCallback(
|
||||
(event) => {
|
||||
switch (event.key) {
|
||||
case 'Enter': {
|
||||
if (!showMenu) {
|
||||
setShowMenu(true);
|
||||
setFocused(selected);
|
||||
} else {
|
||||
setSelected(focused);
|
||||
if (options[focused].value === 'custom_range') {
|
||||
setShowMenu(false);
|
||||
return setShowCalender(true);
|
||||
}
|
||||
|
||||
onChange && onChange(options[focused].value);
|
||||
setShowMenu(false);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'ArrowDown': {
|
||||
event.preventDefault();
|
||||
const newIndex = focused + 1;
|
||||
newIndex < options.length && setFocused(newIndex);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'ArrowUp': {
|
||||
event.preventDefault();
|
||||
const newIndex = focused - 1;
|
||||
newIndex > -1 && setFocused(newIndex);
|
||||
break;
|
||||
}
|
||||
|
||||
// no default
|
||||
}
|
||||
},
|
||||
[onChange, options, showMenu, setShowMenu, setFocused, focused, selected]
|
||||
);
|
||||
|
||||
const handleKeydown = useCallback(
|
||||
(event) => {
|
||||
switch (event.key) {
|
||||
case 'Enter': {
|
||||
if (!showMenu) {
|
||||
setShowMenu(true);
|
||||
setFocused(selected);
|
||||
} else {
|
||||
setSelected(focused);
|
||||
onChange && onChange({ [paramName]: options[focused].value });
|
||||
setShowMenu(false);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'ArrowDown': {
|
||||
event.preventDefault();
|
||||
const newIndex = focused + 1;
|
||||
newIndex < options.length && setFocused(newIndex);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'ArrowUp': {
|
||||
event.preventDefault();
|
||||
const newIndex = focused - 1;
|
||||
newIndex > -1 && setFocused(newIndex);
|
||||
break;
|
||||
}
|
||||
|
||||
// no default
|
||||
}
|
||||
},
|
||||
[onChange, options, showMenu, setShowMenu, setFocused, focused, selected, paramName]
|
||||
);
|
||||
|
||||
const handleDismiss = useCallback(() => {
|
||||
setShowMenu(false);
|
||||
}, [setShowMenu]);
|
||||
|
||||
const findDOMNodes = (component) => {
|
||||
return (component && (component.base || (component.nodeType === 1 && component))) || null;
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const addBackDrop = (e) => {
|
||||
if (showCalender && !findDOMNodes(calenderRef.current).contains(e.target)) {
|
||||
setShowCalender(false);
|
||||
}
|
||||
};
|
||||
window.addEventListener('click', addBackDrop);
|
||||
|
||||
return function cleanup() {
|
||||
window.removeEventListener('click', addBackDrop);
|
||||
};
|
||||
}, [showCalender]);
|
||||
|
||||
switch (type) {
|
||||
case 'datepicker':
|
||||
return (
|
||||
<Fragment>
|
||||
<DatePicker
|
||||
inputRef={ref}
|
||||
label={label}
|
||||
onchange={onChange}
|
||||
onclick={handleClick}
|
||||
onkeydown={handleKeydownDatePicker}
|
||||
trailingIcon={showMenu ? ArrowDropup : ArrowDropdown}
|
||||
value={datePickerValue}
|
||||
/>
|
||||
{showCalender && (
|
||||
<Menu className="rounded-t-none" onDismiss={handleDismiss} relativeTo={ref}>
|
||||
<Calender onChange={handleDateRange} calenderRef={calenderRef} close={() => setShowCalender(false)} />
|
||||
</Menu>
|
||||
)}
|
||||
{showMenu ? (
|
||||
<Menu className="rounded-t-none" onDismiss={handleDismiss} relativeTo={ref} widthRelative>
|
||||
{options.map(({ value, label }, i) => (
|
||||
<MenuItem key={value} label={label} focus={focused === i} onSelect={handleSelect} value={value} />
|
||||
))}
|
||||
</Menu>
|
||||
) : null}
|
||||
</Fragment>
|
||||
);
|
||||
|
||||
// case 'dropdown':
|
||||
default:
|
||||
return (
|
||||
<Fragment>
|
||||
<TextField
|
||||
inputRef={ref}
|
||||
label={label}
|
||||
onchange={onChange}
|
||||
onclick={handleClick}
|
||||
onkeydown={handleKeydown}
|
||||
readonly
|
||||
trailingIcon={showMenu ? ArrowDropup : ArrowDropdown}
|
||||
value={options[selected]?.label}
|
||||
/>
|
||||
{showMenu ? (
|
||||
<Menu className="rounded-t-none" onDismiss={handleDismiss} relativeTo={ref} widthRelative>
|
||||
{options.map(({ value, label }, i) => (
|
||||
<MenuItem
|
||||
key={value}
|
||||
label={label}
|
||||
focus={focused === i}
|
||||
onSelect={handleSelect}
|
||||
value={{ [paramName]: value }}
|
||||
/>
|
||||
))}
|
||||
</Menu>
|
||||
) : null}
|
||||
</Fragment>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,21 +5,40 @@ import { fireEvent, render, screen } from '@testing-library/preact';
|
||||
describe('Select', () => {
|
||||
test('on focus, shows a menu', async () => {
|
||||
const handleChange = jest.fn();
|
||||
render(<Select label="Tacos" onChange={handleChange} options={['tacos', 'burritos']} />);
|
||||
render(
|
||||
<Select
|
||||
label="Tacos"
|
||||
type="dropdown"
|
||||
onChange={handleChange}
|
||||
options={['all', 'tacos', 'burritos']}
|
||||
paramName={['dinner']}
|
||||
selected=""
|
||||
/>
|
||||
);
|
||||
|
||||
expect(screen.queryByRole('listbox')).not.toBeInTheDocument();
|
||||
fireEvent.click(screen.getByRole('textbox'));
|
||||
expect(screen.queryByRole('listbox')).toBeInTheDocument();
|
||||
expect(screen.queryByRole('option', { name: 'all' })).toBeInTheDocument();
|
||||
expect(screen.queryByRole('option', { name: 'tacos' })).toBeInTheDocument();
|
||||
expect(screen.queryByRole('option', { name: 'burritos' })).toBeInTheDocument();
|
||||
|
||||
fireEvent.click(screen.queryByRole('option', { name: 'burritos' }));
|
||||
expect(handleChange).toHaveBeenCalledWith('burritos', 'burritos');
|
||||
fireEvent.click(screen.queryByRole('option', { name: 'tacos' }));
|
||||
expect(handleChange).toHaveBeenCalledWith({ dinner: 'tacos' });
|
||||
});
|
||||
|
||||
test('allows keyboard navigation', async () => {
|
||||
const handleChange = jest.fn();
|
||||
render(<Select label="Tacos" onChange={handleChange} options={['tacos', 'burritos']} />);
|
||||
render(
|
||||
<Select
|
||||
label="Tacos"
|
||||
type="dropdown"
|
||||
onChange={handleChange}
|
||||
options={['tacos', 'burritos']}
|
||||
paramName={['dinner']}
|
||||
selected=""
|
||||
/>
|
||||
);
|
||||
|
||||
expect(screen.queryByRole('listbox')).not.toBeInTheDocument();
|
||||
const input = screen.getByRole('textbox');
|
||||
@@ -29,6 +48,6 @@ describe('Select', () => {
|
||||
|
||||
fireEvent.keyDown(input, { key: 'ArrowDown', code: 'ArrowDown' });
|
||||
fireEvent.keyDown(input, { key: 'Enter', code: 'Enter' });
|
||||
expect(handleChange).toHaveBeenCalledWith('burritos', 'burritos');
|
||||
expect(handleChange).toHaveBeenCalledWith({ dinner: 'burritos' });
|
||||
});
|
||||
});
|
||||
|
||||
@@ -18,7 +18,8 @@ export const useSearchString = (limit, searchParams) => {
|
||||
const removeDefaultSearchKeys = useCallback((searchParams) => {
|
||||
searchParams.delete('limit');
|
||||
searchParams.delete('include_thumbnails');
|
||||
searchParams.delete('before');
|
||||
// removed deletion of "before" as its used by DatePicker
|
||||
// searchParams.delete('before');
|
||||
}, []);
|
||||
|
||||
return { searchString, setSearchString, removeDefaultSearchKeys };
|
||||
|
||||
18
web/src/icons/ArrowLeft.jsx
Normal file
@@ -0,0 +1,18 @@
|
||||
import { h } from 'preact';
|
||||
import { memo } from 'preact/compat';
|
||||
|
||||
export function ArrowLeft({ className = '' }) {
|
||||
return (
|
||||
<svg
|
||||
className={`fill-current ${className}`}
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
width="24"
|
||||
height="24"
|
||||
viewBox="0 0 24 24"
|
||||
>
|
||||
<path d="M12 0c-6.627 0-12 5.373-12 12s5.373 12 12 12 12-5.373 12-12-5.373-12-12-12zm-1.218 19l-1.782-1.75 5.25-5.25-5.25-5.25 1.782-1.75 6.968 7-6.968 7z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
export default memo(ArrowLeft);
|
||||
12
web/src/icons/ArrowRight.jsx
Normal file
@@ -0,0 +1,12 @@
|
||||
import { h } from 'preact';
|
||||
import { memo } from 'preact/compat';
|
||||
|
||||
export function ArrowRight({ className = '' }) {
|
||||
return (
|
||||
<svg className={`fill-current ${className}`} xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
|
||||
<path d="M5 3l3.057-3 11.943 12-11.943 12-3.057-3 9-9z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
export default memo(ArrowRight);
|
||||
12
web/src/icons/ArrowRightDouble.jsx
Normal file
@@ -0,0 +1,12 @@
|
||||
import { h } from 'preact';
|
||||
import { memo } from 'preact/compat';
|
||||
|
||||
export function ArrowRightDouble({ className = '' }) {
|
||||
return (
|
||||
<svg className={`fill-current ${className}`} xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
|
||||
<path d="M0 3.795l2.995-2.98 11.132 11.185-11.132 11.186-2.995-2.981 8.167-8.205-8.167-8.205zm18.04 8.205l-8.167 8.205 2.995 2.98 11.132-11.185-11.132-11.186-2.995 2.98 8.167 8.206z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
export default memo(ArrowRightDouble);
|
||||
@@ -21,6 +21,7 @@ export default function Camera({ camera }) {
|
||||
const [viewMode, setViewMode] = useState('live');
|
||||
|
||||
const cameraConfig = config?.cameras[camera];
|
||||
const liveWidth = Math.round(cameraConfig.live.height * (cameraConfig.detect.width / cameraConfig.detect.height))
|
||||
const [options, setOptions] = usePersistence(`${camera}-feed`, emptyObject);
|
||||
|
||||
const handleSetOption = useCallback(
|
||||
@@ -87,7 +88,7 @@ export default function Camera({ camera }) {
|
||||
player = (
|
||||
<Fragment>
|
||||
<div>
|
||||
<JSMpegPlayer camera={camera} />
|
||||
<JSMpegPlayer camera={camera} width={liveWidth} height={cameraConfig.live.height} />
|
||||
</div>
|
||||
</Fragment>
|
||||
);
|
||||
|
||||
@@ -99,7 +99,7 @@ export default function Event({ eventId, close, scrollRef }) {
|
||||
}
|
||||
|
||||
const startime = new Date(data.start_time * 1000);
|
||||
const endtime = new Date(data.end_time * 1000);
|
||||
const endtime = data.end_time ? new Date(data.end_time * 1000) : null;
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
<div className="flex md:flex-row justify-between flex-wrap flex-col">
|
||||
@@ -155,7 +155,7 @@ export default function Event({ eventId, close, scrollRef }) {
|
||||
<Tr index={1}>
|
||||
<Td>Timeframe</Td>
|
||||
<Td>
|
||||
{startime.toLocaleString()} – {endtime.toLocaleString()}
|
||||
{startime.toLocaleString()}{endtime === null ? ` – ${endtime.toLocaleString()}`:''}
|
||||
</Td>
|
||||
</Tr>
|
||||
<Tr>
|
||||
@@ -186,7 +186,7 @@ export default function Event({ eventId, close, scrollRef }) {
|
||||
},
|
||||
],
|
||||
poster: data.has_snapshot
|
||||
? `${apiHost}/clips/${data.camera}-${eventId}.jpg`
|
||||
? `${apiHost}/api/events/${eventId}/snapshot.jpg`
|
||||
: `data:image/jpeg;base64,${data.thumbnail}`,
|
||||
}}
|
||||
seekOptions={{ forward: 10, back: 5 }}
|
||||
@@ -199,7 +199,7 @@ export default function Event({ eventId, close, scrollRef }) {
|
||||
<img
|
||||
src={
|
||||
data.has_snapshot
|
||||
? `${apiHost}/clips/${data.camera}-${eventId}.jpg`
|
||||
? `${apiHost}/api/events/${eventId}/snapshot.jpg`
|
||||
: `data:image/jpeg;base64,${data.thumbnail}`
|
||||
}
|
||||
alt={`${data.label} at ${(data.top_score * 100).toFixed(1)}% confidence`}
|
||||
|
||||
@@ -1,31 +1,26 @@
|
||||
import { h } from 'preact';
|
||||
import Select from '../../../components/Select';
|
||||
import { useCallback, useMemo } from 'preact/hooks';
|
||||
import { useCallback } from 'preact/hooks';
|
||||
|
||||
const Filter = ({ onChange, searchParams, paramName, options }) => {
|
||||
function Filter({ onChange, searchParams, paramName, options, ...rest }) {
|
||||
const handleSelect = useCallback(
|
||||
(key) => {
|
||||
const newParams = new URLSearchParams(searchParams.toString());
|
||||
if (key !== 'all') {
|
||||
newParams.set(paramName, key);
|
||||
} else {
|
||||
newParams.delete(paramName);
|
||||
}
|
||||
Object.keys(key).map((entries) => {
|
||||
if (key[entries] !== 'all') {
|
||||
newParams.set(entries, key[entries]);
|
||||
} else {
|
||||
paramName.map((p) => newParams.delete(p));
|
||||
}
|
||||
});
|
||||
|
||||
onChange(newParams);
|
||||
},
|
||||
[searchParams, paramName, onChange]
|
||||
);
|
||||
|
||||
const selectOptions = useMemo(() => ['all', ...options], [options]);
|
||||
|
||||
return (
|
||||
<Select
|
||||
label={`${paramName.charAt(0).toUpperCase()}${paramName.substr(1)}`}
|
||||
onChange={handleSelect}
|
||||
options={selectOptions}
|
||||
selected={searchParams.get(paramName) || 'all'}
|
||||
/>
|
||||
);
|
||||
};
|
||||
const obj = {};
|
||||
paramName.map((name) => Object.assign(obj, { [name]: searchParams.get(name) }), [searchParams]);
|
||||
return <Select onChange={handleSelect} options={options} selected={obj} paramName={paramName} {...rest} />;
|
||||
}
|
||||
export default Filter;
|
||||
|
||||
@@ -3,7 +3,13 @@ import { useCallback, useMemo } from 'preact/hooks';
|
||||
import Link from '../../../components/Link';
|
||||
import { route } from 'preact-router';
|
||||
|
||||
const Filterable = ({ onFilter, pathname, searchParams, paramName, name, removeDefaultSearchKeys }) => {
|
||||
function Filterable({ onFilter, pathname, searchParams, paramName, name }) {
|
||||
const removeDefaultSearchKeys = useCallback((searchParams) => {
|
||||
searchParams.delete('limit');
|
||||
searchParams.delete('include_thumbnails');
|
||||
// searchParams.delete('before');
|
||||
}, []);
|
||||
|
||||
const href = useMemo(() => {
|
||||
const params = new URLSearchParams(searchParams.toString());
|
||||
params.set(paramName, name);
|
||||
@@ -27,6 +33,6 @@ const Filterable = ({ onFilter, pathname, searchParams, paramName, name, removeD
|
||||
{name}
|
||||
</Link>
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
export default Filterable;
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import { h } from 'preact';
|
||||
import Filter from './filter';
|
||||
import { useConfig } from '../../../api';
|
||||
import { useMemo } from 'preact/hooks';
|
||||
import { useMemo, useState } from 'preact/hooks';
|
||||
import { DateFilterOptions } from '../../../components/DatePicker';
|
||||
import Button from '../../../components/Button';
|
||||
|
||||
const Filters = ({ onChange, searchParams }) => {
|
||||
const [viewFilters, setViewFilters] = useState(false);
|
||||
const { data } = useConfig();
|
||||
|
||||
const cameras = useMemo(() => Object.keys(data.cameras), [data]);
|
||||
|
||||
const zones = useMemo(
|
||||
@@ -27,12 +29,52 @@ const Filters = ({ onChange, searchParams }) => {
|
||||
}, data.objects?.track || [])
|
||||
.filter((value, i, self) => self.indexOf(value) === i);
|
||||
}, [data]);
|
||||
|
||||
return (
|
||||
<div className="flex space-x-4">
|
||||
<Filter onChange={onChange} options={cameras} paramName="camera" searchParams={searchParams} />
|
||||
<Filter onChange={onChange} options={zones} paramName="zone" searchParams={searchParams} />
|
||||
<Filter onChange={onChange} options={labels} paramName="label" searchParams={searchParams} />
|
||||
<div>
|
||||
<Button
|
||||
onClick={() => setViewFilters(!viewFilters)}
|
||||
className="block xs:hidden w-full mb-4 text-center"
|
||||
type="text"
|
||||
>
|
||||
{`${viewFilters ? 'Hide Filter' : 'Filter'}`}
|
||||
</Button>
|
||||
<div className={`xs:flex space-y-1 xs:space-y-0 xs:space-x-4 ${viewFilters ? 'flex-col' : 'hidden'}`}>
|
||||
<Filter
|
||||
type="dropdown"
|
||||
onChange={onChange}
|
||||
options={['all', ...cameras]}
|
||||
paramName={['camera']}
|
||||
label="Camera"
|
||||
searchParams={searchParams}
|
||||
/>
|
||||
|
||||
<Filter
|
||||
type="dropdown"
|
||||
onChange={onChange}
|
||||
options={['all', ...zones]}
|
||||
paramName={['zone']}
|
||||
label="Zone"
|
||||
searchParams={searchParams}
|
||||
/>
|
||||
|
||||
<Filter
|
||||
type="dropdown"
|
||||
onChange={onChange}
|
||||
options={['all', ...labels]}
|
||||
paramName={['label']}
|
||||
label="Label"
|
||||
searchParams={searchParams}
|
||||
/>
|
||||
|
||||
<Filter
|
||||
type="datepicker"
|
||||
onChange={onChange}
|
||||
options={DateFilterOptions}
|
||||
paramName={['before', 'after']}
|
||||
label="DatePicker"
|
||||
searchParams={searchParams}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -42,7 +42,7 @@ const EventsRow = memo(
|
||||
);
|
||||
|
||||
const start = new Date(parseInt(startTime * 1000, 10));
|
||||
const end = new Date(parseInt(endTime * 1000, 10));
|
||||
const end = endTime ? new Date(parseInt(endTime * 1000, 10)) : null;
|
||||
|
||||
return (
|
||||
<Tbody reference={innerRef}>
|
||||
@@ -102,7 +102,7 @@ const EventsRow = memo(
|
||||
</Td>
|
||||
<Td>{start.toLocaleDateString()}</Td>
|
||||
<Td>{start.toLocaleTimeString()}</Td>
|
||||
<Td>{end.toLocaleTimeString()}</Td>
|
||||
<Td>{end === null ? 'In progress' : end.toLocaleTimeString()}</Td>
|
||||
</Tr>
|
||||
{viewEvent === id ? (
|
||||
<Tr className="border-b-1">
|
||||
|
||||
@@ -13,7 +13,7 @@ describe('Camera Route', () => {
|
||||
mockSetOptions = jest.fn();
|
||||
mockUsePersistence = jest.spyOn(Context, 'usePersistence').mockImplementation(() => [{}, mockSetOptions]);
|
||||
jest.spyOn(Api, 'useConfig').mockImplementation(() => ({
|
||||
data: { cameras: { front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] } } } },
|
||||
data: { cameras: { front: { name: 'front', detect: {width: 1280, height: 720}, live: {height: 720}, objects: { track: ['taco', 'cat', 'dog'] } } } },
|
||||
}));
|
||||
jest.spyOn(Api, 'useApiHost').mockImplementation(() => 'http://base-url.local:5000');
|
||||
jest.spyOn(AutoUpdatingCameraImage, 'default').mockImplementation(({ searchParams }) => {
|
||||
|
||||
@@ -57,7 +57,6 @@ describe('Event Route', () => {
|
||||
const mockEvent = {
|
||||
camera: 'front',
|
||||
end_time: 1613257337.841237,
|
||||
false_positive: false,
|
||||
has_clip: true,
|
||||
has_snapshot: true,
|
||||
id: '1613257326.237365-83cgl2',
|
||||
|
||||
@@ -71,7 +71,6 @@ describe('Events Route', () => {
|
||||
|
||||
const mockEvents = new Array(12).fill(null).map((v, i) => ({
|
||||
end_time: 1613257337 + i,
|
||||
false_positive: false,
|
||||
has_clip: true,
|
||||
has_snapshot: true,
|
||||
id: i,
|
||||
|
||||