Compare commits

..

1 Commits

Author SHA1 Message Date
Blake Blackshear
1d58e419f4 add release workflow for images 2023-10-28 06:34:15 -05:00
99 changed files with 13526 additions and 10039 deletions

View File

@@ -42,6 +42,7 @@
"extensions": [ "extensions": [
"ms-python.python", "ms-python.python",
"ms-python.vscode-pylance", "ms-python.vscode-pylance",
"ms-python.black-formatter",
"visualstudioexptteam.vscodeintellicode", "visualstudioexptteam.vscodeintellicode",
"mhutchie.git-graph", "mhutchie.git-graph",
"ms-azuretools.vscode-docker", "ms-azuretools.vscode-docker",
@@ -52,10 +53,13 @@
"csstools.postcss", "csstools.postcss",
"blanu.vscode-styled-jsx", "blanu.vscode-styled-jsx",
"bradlc.vscode-tailwindcss", "bradlc.vscode-tailwindcss",
"ms-python.isort",
"charliermarsh.ruff" "charliermarsh.ruff"
], ],
"settings": { "settings": {
"remote.autoForwardPorts": false, "remote.autoForwardPorts": false,
"python.linting.pylintEnabled": true,
"python.linting.enabled": true,
"python.formatting.provider": "none", "python.formatting.provider": "none",
"python.languageServer": "Pylance", "python.languageServer": "Pylance",
"editor.formatOnPaste": false, "editor.formatOnPaste": false,
@@ -68,7 +72,7 @@
"eslint.workingDirectories": ["./web"], "eslint.workingDirectories": ["./web"],
"isort.args": ["--settings-path=./pyproject.toml"], "isort.args": ["--settings-path=./pyproject.toml"],
"[python]": { "[python]": {
"editor.defaultFormatter": "charliermarsh.ruff", "editor.defaultFormatter": "ms-python.black-formatter",
"editor.formatOnSave": true, "editor.formatOnSave": true,
"editor.codeActionsOnSave": { "editor.codeActionsOnSave": {
"source.fixAll": true, "source.fixAll": true,

View File

@@ -18,12 +18,6 @@ updates:
interval: daily interval: daily
open-pull-requests-limit: 10 open-pull-requests-limit: 10
target-branch: dev target-branch: dev
- package-ecosystem: "pip"
directory: "/docker/tensorrt"
schedule:
interval: daily
open-pull-requests-limit: 10
target-branch: dev
- package-ecosystem: "npm" - package-ecosystem: "npm"
directory: "/web" directory: "/web"
schedule: schedule:

View File

@@ -79,15 +79,6 @@ jobs:
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64 *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
- name: Build and push RockChip build
uses: docker/bake-action@v3
with:
push: true
targets: rk
files: docker/rockchip/rk.hcl
set: |
rk.tags=${{ steps.setup.outputs.image-name }}-rk
*.cache-from=type=gha
jetson_jp4_build: jetson_jp4_build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Jetson Jetpack 4 name: Jetson Jetpack 4
@@ -150,7 +141,7 @@ jobs:
- arm64_build - arm64_build
steps: steps:
- id: lowercaseRepo - id: lowercaseRepo
uses: ASzc/change-string-case-action@v6 uses: ASzc/change-string-case-action@v5
with: with:
string: ${{ github.repository }} string: ${{ github.repository }}
- name: Log in to the Container registry - name: Log in to the Container registry

View File

@@ -65,17 +65,20 @@ jobs:
- name: Check out the repository - name: Check out the repository
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
uses: actions/setup-python@v5.0.0 uses: actions/setup-python@v4.7.1
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
- name: Install requirements - name: Install requirements
run: | run: |
python3 -m pip install -U pip python3 -m pip install -U pip
python3 -m pip install -r docker/main/requirements-dev.txt python3 -m pip install -r docker/main/requirements-dev.txt
- name: Check formatting - name: Check black
run: | run: |
ruff format --check --diff frigate migrations docker *.py black --check --diff frigate migrations docker *.py
- name: Check lint - name: Check isort
run: |
isort --check --diff frigate migrations docker *.py
- name: Check ruff
run: | run: |
ruff check frigate migrations docker *.py ruff check frigate migrations docker *.py

View File

@@ -1,7 +1,6 @@
name: On release name: On release
on: on:
workflow_dispatch:
release: release:
types: [published] types: [published]
@@ -12,7 +11,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- id: lowercaseRepo - id: lowercaseRepo
uses: ASzc/change-string-case-action@v6 uses: ASzc/change-string-case-action@v5
with: with:
string: ${{ github.repository }} string: ${{ github.repository }}
- name: Log in to the Container registry - name: Log in to the Container registry
@@ -23,15 +22,41 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Create tag variables - name: Create tag variables
run: | run: |
BRANCH=$([[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "master" || echo "dev")
echo "BASE=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}" >> $GITHUB_ENV echo "BASE=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}" >> $GITHUB_ENV
echo "BUILD_TAG=${BRANCH}-${GITHUB_SHA::7}" >> $GITHUB_ENV echo "BUILD_TAG=${{ github.ref_name }}-${GITHUB_SHA::7}" >> $GITHUB_ENV
echo "CLEAN_VERSION=$(echo ${GITHUB_REF##*/} | tr '[:upper:]' '[:lower:]' | sed 's/^[v]//')" >> $GITHUB_ENV echo "CLEAN_VERSION=$(echo ${GITHUB_REF##*/} | tr '[:upper:]' '[:lower:]' | sed 's/^[v]//')" >> $GITHUB_ENV
- name: Tag and push the main image - name: Tag and push the main image
run: | run: |
VERSION_TAG=${BASE}:${CLEAN_VERSION} VERSION_TAG=${BASE}:${CLEAN_VERSION}
PULL_TAG=${BASE}:${BUILD_TAG} PULL_TAG=${BASE}:${BUILD_TAG}
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG} docker pull ${PULL_TAG}
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk; do docker tag ${PULL_TAG} ${VERSION_TAG}
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant} docker push ${VERSION_TAG}
done - name: Tag and push standard arm64
run: |
VERSION_TAG=${BASE}:${CLEAN_VERSION}-standard-arm64
PULL_TAG=${BASE}:${BUILD_TAG}-standard-arm64
docker pull ${PULL_TAG}
docker tag ${PULL_TAG} ${VERSION_TAG}
docker push ${VERSION_TAG}
- name: Tag and push tensorrt
run: |
VERSION_TAG=${BASE}:${CLEAN_VERSION}-tensorrt
PULL_TAG=${BASE}:${BUILD_TAG}-tensorrt
docker pull ${PULL_TAG}
docker tag ${PULL_TAG} ${VERSION_TAG}
docker push ${VERSION_TAG}
- name: Tag and push tensorrt-jp4
run: |
VERSION_TAG=${BASE}:${CLEAN_VERSION}-tensorrt-jp4
PULL_TAG=${BASE}:${BUILD_TAG}-tensorrt-jp4
docker pull ${PULL_TAG}
docker tag ${PULL_TAG} ${VERSION_TAG}
docker push ${VERSION_TAG}
- name: Tag and push tensorrt-jp5
run: |
VERSION_TAG=${BASE}:${CLEAN_VERSION}-tensorrt-jp5
PULL_TAG=${BASE}:${BUILD_TAG}-tensorrt-jp5
docker pull ${PULL_TAG}
docker tag ${PULL_TAG} ${VERSION_TAG}
docker push ${VERSION_TAG}

View File

@@ -2,5 +2,3 @@
/docker/tensorrt/ @madsciencetist @NateMeyer /docker/tensorrt/ @madsciencetist @NateMeyer
/docker/tensorrt/*arm64* @madsciencetist /docker/tensorrt/*arm64* @madsciencetist
/docker/tensorrt/*jetson* @madsciencetist /docker/tensorrt/*jetson* @madsciencetist
/docker/rockchip/ @MarcA711

View File

@@ -14,14 +14,13 @@ services:
dockerfile: docker/main/Dockerfile dockerfile: docker/main/Dockerfile
# Use target devcontainer-trt for TensorRT dev # Use target devcontainer-trt for TensorRT dev
target: devcontainer target: devcontainer
## Uncomment this block for nvidia gpu support deploy:
# deploy: resources:
# resources: reservations:
# reservations: devices:
# devices: - driver: nvidia
# - driver: nvidia count: 1
# count: 1 capabilities: [gpu]
# capabilities: [gpu]
environment: environment:
YOLO_MODELS: yolov7-320 YOLO_MODELS: yolov7-320
devices: devices:

View File

@@ -33,7 +33,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
FROM scratch AS go2rtc FROM scratch AS go2rtc
ARG TARGETARCH ARG TARGETARCH
WORKDIR /rootfs/usr/local/go2rtc/bin WORKDIR /rootfs/usr/local/go2rtc/bin
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.8.4/go2rtc_linux_${TARGETARCH}" go2rtc ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.8.1/go2rtc_linux_${TARGETARCH}" go2rtc
#### ####
@@ -215,13 +215,13 @@ COPY docker/main/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run
RUN mkdir -p /opt/frigate \ RUN mkdir -p /opt/frigate \
&& ln -svf /workspace/frigate/frigate /opt/frigate/frigate && ln -svf /workspace/frigate/frigate /opt/frigate/frigate
# Install Node 20 # Install Node 16
RUN curl -SLO https://deb.nodesource.com/nsolid_setup_deb.sh && \ RUN apt-get update \
chmod 500 nsolid_setup_deb.sh && \ && apt-get install wget -y \
./nsolid_setup_deb.sh 20 && \ && wget -qO- https://deb.nodesource.com/setup_16.x | bash - \
apt-get install nodejs -y \ && apt-get install -y nodejs \
&& rm -rf /var/lib/apt/lists/* \ && rm -rf /var/lib/apt/lists/* \
&& npm install -g npm@10 && npm install -g npm@9
WORKDIR /workspace/frigate WORKDIR /workspace/frigate

View File

@@ -2,7 +2,7 @@
set -euxo pipefail set -euxo pipefail
NGINX_VERSION="1.25.3" NGINX_VERSION="1.25.2"
VOD_MODULE_VERSION="1.31" VOD_MODULE_VERSION="1.31"
SECURE_TOKEN_MODULE_VERSION="1.5" SECURE_TOKEN_MODULE_VERSION="1.5"
RTMP_MODULE_VERSION="1.2.2" RTMP_MODULE_VERSION="1.2.2"

View File

@@ -1 +1,3 @@
black == 23.10.*
isort
ruff ruff

View File

@@ -13,9 +13,9 @@ psutil == 5.9.*
pydantic == 1.10.* pydantic == 1.10.*
git+https://github.com/fbcotter/py3nvml#egg=py3nvml git+https://github.com/fbcotter/py3nvml#egg=py3nvml
PyYAML == 6.0.* PyYAML == 6.0.*
pytz == 2023.3.post1 pytz == 2023.3
ruamel.yaml == 0.18.* ruamel.yaml == 0.17.*
tzlocal == 5.2 tzlocal == 5.1
types-PyYAML == 6.0.* types-PyYAML == 6.0.*
requests == 2.31.* requests == 2.31.*
types-requests == 2.31.* types-requests == 2.31.*
@@ -23,7 +23,6 @@ scipy == 1.11.*
norfair == 2.2.* norfair == 2.2.*
setproctitle == 1.3.* setproctitle == 1.3.*
ws4py == 0.5.* ws4py == 0.5.*
unidecode == 1.3.*
# Openvino Library - Custom built with MYRIAD support # Openvino Library - Custom built with MYRIAD support
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64' openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64'
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64' openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64'

View File

@@ -45,13 +45,8 @@ function get_ip_and_port_from_supervisor() {
export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+') export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+')
if [[ -f "/dev/shm/go2rtc.yaml" ]]; then
echo "[INFO] Removing stale config from last run..."
rm /dev/shm/go2rtc.yaml
fi
if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then
echo "[INFO] Preparing new go2rtc config..." echo "[INFO] Preparing go2rtc config..."
if [[ -n "${SUPERVISOR_TOKEN:-}" ]]; then if [[ -n "${SUPERVISOR_TOKEN:-}" ]]; then
# Running as a Home Assistant add-on, infer the IP address and port # Running as a Home Assistant add-on, infer the IP address and port
@@ -59,8 +54,6 @@ if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then
fi fi
python3 /usr/local/go2rtc/create_config.py python3 /usr/local/go2rtc/create_config.py
else
echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually."
fi fi
readonly config_path="/config" readonly config_path="/config"

View File

@@ -3,7 +3,6 @@
import json import json
import os import os
import sys import sys
from pathlib import Path
import yaml import yaml
@@ -17,14 +16,6 @@ sys.path.remove("/opt/frigate")
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")} FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
# read docker secret files as env vars too
if os.path.isdir("/run/secrets"):
for secret_file in os.listdir("/run/secrets"):
if secret_file.startswith("FRIGATE_"):
FRIGATE_ENV_VARS[secret_file] = Path(
os.path.join("/run/secrets", secret_file)
).read_text()
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml") config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
# Check if we can use .yaml instead of .yml # Check if we can use .yaml instead of .yml
@@ -58,15 +49,7 @@ if go2rtc_config.get("log") is None:
elif go2rtc_config["log"].get("format") is None: elif go2rtc_config["log"].get("format") is None:
go2rtc_config["log"]["format"] = "text" go2rtc_config["log"]["format"] = "text"
# ensure there is a default webrtc config if not go2rtc_config.get("webrtc", {}).get("candidates", []):
if not go2rtc_config.get("webrtc"):
go2rtc_config["webrtc"] = {}
# go2rtc should listen on 8555 tcp & udp by default
if not go2rtc_config["webrtc"].get("listen"):
go2rtc_config["webrtc"]["listen"] = ":8555"
if not go2rtc_config["webrtc"].get("candidates", []):
default_candidates = [] default_candidates = []
# use internal candidate if it was discovered when running through the add-on # use internal candidate if it was discovered when running through the add-on
internal_candidate = os.environ.get( internal_candidate = os.environ.get(
@@ -113,20 +96,6 @@ if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59:
"rtsp" "rtsp"
] = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" ] = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
# add hardware acceleration presets for rockchip devices
# may be removed if frigate uses a go2rtc version that includes these presets
if go2rtc_config.get("ffmpeg") is None:
go2rtc_config["ffmpeg"] = {
"h264/rk": "-c:v h264_rkmpp_encoder -g 50 -bf 0",
"h265/rk": "-c:v hevc_rkmpp_encoder -g 50 -bf 0",
}
else:
if go2rtc_config["ffmpeg"].get("h264/rk") is None:
go2rtc_config["ffmpeg"]["h264/rk"] = "-c:v h264_rkmpp_encoder -g 50 -bf 0"
if go2rtc_config["ffmpeg"].get("h265/rk") is None:
go2rtc_config["ffmpeg"]["h265/rk"] = "-c:v hevc_rkmpp_encoder -g 50 -bf 0"
for name in go2rtc_config.get("streams", {}): for name in go2rtc_config.get("streams", {}):
stream = go2rtc_config["streams"][name] stream = go2rtc_config["streams"][name]

View File

@@ -32,13 +32,6 @@ http {
gzip_proxied no-cache no-store private expired auth; gzip_proxied no-cache no-store private expired auth;
gzip_vary on; gzip_vary on;
proxy_cache_path /dev/shm/nginx_cache levels=1:2 keys_zone=api_cache:10m max_size=10m inactive=1m use_temp_path=off;
map $sent_http_content_type $should_not_cache {
'application/json' 0;
default 1;
}
upstream frigate_api { upstream frigate_api {
server 127.0.0.1:5001; server 127.0.0.1:5001;
keepalive 1024; keepalive 1024;
@@ -164,47 +157,19 @@ http {
include proxy.conf; include proxy.conf;
} }
# frigate lovelace card uses this path location /live/mse/ {
location /live/mse/api/ws { proxy_pass http://go2rtc/;
limit_except GET {
deny all;
}
proxy_pass http://go2rtc/api/ws;
include proxy.conf; include proxy.conf;
} }
location /live/webrtc/api/ws { location /live/webrtc/ {
limit_except GET { proxy_pass http://go2rtc/;
deny all;
}
proxy_pass http://go2rtc/api/ws;
include proxy.conf; include proxy.conf;
} }
# pass through go2rtc player location ~* /api/go2rtc([/]?.*)$ {
location /live/webrtc/webrtc.html { proxy_pass http://go2rtc;
limit_except GET { rewrite ^/api/go2rtc(.*)$ /api$1 break;
deny all;
}
proxy_pass http://go2rtc/webrtc.html;
include proxy.conf;
}
# frontend uses this to fetch the version
location /api/go2rtc/api {
limit_except GET {
deny all;
}
proxy_pass http://go2rtc/api;
include proxy.conf;
}
# integration uses this to add webrtc candidate
location /api/go2rtc/webrtc {
limit_except POST {
deny all;
}
proxy_pass http://go2rtc/api/webrtc;
include proxy.conf; include proxy.conf;
} }
@@ -220,20 +185,6 @@ http {
proxy_pass http://frigate_api/; proxy_pass http://frigate_api/;
include proxy.conf; include proxy.conf;
proxy_cache api_cache;
proxy_cache_lock on;
proxy_cache_use_stale updating;
proxy_cache_valid 200 5s;
proxy_cache_bypass $http_x_cache_bypass;
proxy_no_cache $should_not_cache;
add_header X-Cache-Status $upstream_cache_status;
location /api/vod/ {
proxy_pass http://frigate_api/vod/;
include proxy.conf;
proxy_cache off;
}
location /api/stats { location /api/stats {
access_log off; access_log off;
rewrite ^/api/(.*)$ $1 break; rewrite ^/api/(.*)$ $1 break;

View File

@@ -1,32 +0,0 @@
# syntax=docker/dockerfile:1.6
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
FROM wheels as rk-wheels
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
FROM deps AS rk-deps
ARG TARGETARCH
RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
pip3 install -U /deps/rk-wheels/*.whl
WORKDIR /opt/frigate/
COPY --from=rootfs / /
ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk356x.so /usr/lib/
ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk3588.so /usr/lib/
ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3562/yolov8n-320x320-rk3562.rknn /models/rknn/
ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3566/yolov8n-320x320-rk3566.rknn /models/rknn/
ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3568/yolov8n-320x320-rk3568.rknn /models/rknn/
ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3588/yolov8n-320x320-rk3588.rknn /models/rknn/
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.0-1/ffmpeg /usr/lib/btbn-ffmpeg/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.0-1/ffprobe /usr/lib/btbn-ffmpeg/bin/

View File

@@ -1,2 +0,0 @@
hide-warnings == 0.17
rknn-toolkit-lite2 @ https://github.com/MarcA711/rknn-toolkit2/releases/download/v1.5.2/rknn_toolkit_lite2-1.5.2-cp39-cp39-linux_aarch64.whl

View File

@@ -1,34 +0,0 @@
target wget {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "wget"
}
target wheels {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "wheels"
}
target deps {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "deps"
}
target rootfs {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "rootfs"
}
target rk {
dockerfile = "docker/rockchip/Dockerfile"
contexts = {
wget = "target:wget",
wheels = "target:wheels",
deps = "target:deps",
rootfs = "target:rootfs"
}
platforms = ["linux/arm64"]
}

View File

@@ -1,10 +0,0 @@
BOARDS += rk
local-rk: version
docker buildx bake --load --file=docker/rockchip/rk.hcl --set rk.tags=frigate:latest-rk rk
build-rk: version
docker buildx bake --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk
push-rk: build-rk
docker buildx bake --push --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk

View File

@@ -120,7 +120,7 @@ NOTE: The folder that is mapped from the host needs to be the folder that contai
## Custom go2rtc version ## Custom go2rtc version
Frigate currently includes go2rtc v1.8.4, there may be certain cases where you want to run a different version of go2rtc. Frigate currently includes go2rtc v1.8.1, there may be certain cases where you want to run a different version of go2rtc.
To do this: To do this:

View File

@@ -31,7 +31,7 @@ First, set up a PTZ preset in your camera's firmware and give it a name. If you'
Edit your Frigate configuration file and enter the ONVIF parameters for your camera. Specify the object types to track, a required zone the object must enter to begin autotracking, and the camera preset name you configured in your camera's firmware to return to when tracking has ended. Optionally, specify a delay in seconds before Frigate returns the camera to the preset. Edit your Frigate configuration file and enter the ONVIF parameters for your camera. Specify the object types to track, a required zone the object must enter to begin autotracking, and the camera preset name you configured in your camera's firmware to return to when tracking has ended. Optionally, specify a delay in seconds before Frigate returns the camera to the preset.
An [ONVIF connection](cameras.md) is required for autotracking to function. Also, a [motion mask](masks.md) over your camera's timestamp and any overlay text is recommended to ensure they are completely excluded from scene change calculations when the camera is moving. An [ONVIF connection](cameras.md) is required for autotracking to function.
Note that `autotracking` is disabled by default but can be enabled in the configuration or by MQTT. Note that `autotracking` is disabled by default but can be enabled in the configuration or by MQTT.
@@ -113,7 +113,7 @@ If you initially calibrate with zooming disabled and then enable zooming at a la
Every PTZ camera is different, so autotracking may not perform ideally in every situation. This experimental feature was initially developed using an EmpireTech/Dahua SD1A404XB-GNR. Every PTZ camera is different, so autotracking may not perform ideally in every situation. This experimental feature was initially developed using an EmpireTech/Dahua SD1A404XB-GNR.
The object tracker in Frigate estimates the motion of the PTZ so that tracked objects are preserved when the camera moves. In most cases 5 fps is sufficient, but if you plan to track faster moving objects, you may want to increase this slightly. Higher frame rates (> 10fps) will only slow down Frigate and the motion estimator and may lead to dropped frames, especially if you are using experimental zooming. The object tracker in Frigate estimates the motion of the PTZ so that tracked objects are preserved when the camera moves. In most cases (especially for faster moving objects), the default 5 fps is insufficient for the motion estimator to perform accurately. 10 fps is the current recommendation. Higher frame rates will likely not be more performant and will only slow down Frigate and the motion estimator. Adjust your camera to output at least 10 frames per second and change the `fps` parameter in the [detect configuration](index.md) of your configuration file.
A fast [detector](object_detectors.md) is recommended. CPU detectors will not perform well or won't work at all. You can watch Frigate's debug viewer for your camera to see a thicker colored box around the object currently being autotracked. A fast [detector](object_detectors.md) is recommended. CPU detectors will not perform well or won't work at all. You can watch Frigate's debug viewer for your camera to see a thicker colored box around the object currently being autotracked.

View File

@@ -127,20 +127,6 @@ cameras:
- detect - detect
``` ```
#### Reolink Doorbell
The reolink doorbell supports 2-way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
```yaml
go2rtc:
streams:
your_reolink_doorbell:
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus"
- rtsp://reolink_ip/Preview_01_sub
your_reolink_doorbell_sub:
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password"
```
### Unifi Protect Cameras ### Unifi Protect Cameras
Unifi protect cameras require the rtspx stream to be used with go2rtc. Unifi protect cameras require the rtspx stream to be used with go2rtc.
@@ -154,7 +140,7 @@ go2rtc:
- rtspx://192.168.1.1:7441/abcdefghijk - rtspx://192.168.1.1:7441/abcdefghijk
``` ```
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-rtsp) [See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.8.1#source-rtsp)
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record and rtmp if used directly with unifi protect. In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record and rtmp if used directly with unifi protect.

View File

@@ -90,7 +90,6 @@ This list of working and non-working PTZ cameras is based on user feedback.
| Reolink 511WA | ✅ | ❌ | Zoom only | | Reolink 511WA | ✅ | ❌ | Zoom only |
| Reolink E1 Pro | ✅ | ❌ | | | Reolink E1 Pro | ✅ | ❌ | |
| Reolink E1 Zoom | ✅ | ❌ | | | Reolink E1 Zoom | ✅ | ❌ | |
| Reolink RLC-823A 16x | ✅ | ❌ | |
| Sunba 405-D20X | ✅ | ❌ | | | Sunba 405-D20X | ✅ | ❌ | |
| Tapo C200 | ✅ | ❌ | Incomplete ONVIF support | | Tapo C200 | ✅ | ❌ | Incomplete ONVIF support |
| Tapo C210 | ❌ | ❌ | Incomplete ONVIF support | | Tapo C210 | ❌ | ❌ | Incomplete ONVIF support |

View File

@@ -13,8 +13,8 @@ See [the hwaccel docs](/configuration/hardware_acceleration.md) for more info on
| Preset | Usage | Other Notes | | Preset | Usage | Other Notes |
| --------------------- | ------------------------------ | ----------------------------------------------------- | | --------------------- | ------------------------------ | ----------------------------------------------------- |
| preset-rpi-32-h264 | 32 bit Rpi with h264 stream | |
| preset-rpi-64-h264 | 64 bit Rpi with h264 stream | | | preset-rpi-64-h264 | 64 bit Rpi with h264 stream | |
| preset-rpi-64-h265 | 64 bit Rpi with h265 stream | |
| preset-vaapi | Intel & AMD VAAPI | Check hwaccel docs to ensure correct driver is chosen | | preset-vaapi | Intel & AMD VAAPI | Check hwaccel docs to ensure correct driver is chosen |
| preset-intel-qsv-h264 | Intel QSV with h264 stream | If issues occur recommend using vaapi preset instead | | preset-intel-qsv-h264 | Intel QSV with h264 stream | If issues occur recommend using vaapi preset instead |
| preset-intel-qsv-h265 | Intel QSV with h265 stream | If issues occur recommend using vaapi preset instead | | preset-intel-qsv-h265 | Intel QSV with h265 stream | If issues occur recommend using vaapi preset instead |
@@ -23,8 +23,6 @@ See [the hwaccel docs](/configuration/hardware_acceleration.md) for more info on
| preset-nvidia-mjpeg | Nvidia GPU with mjpeg stream | Recommend restreaming mjpeg and using nvidia-h264 | | preset-nvidia-mjpeg | Nvidia GPU with mjpeg stream | Recommend restreaming mjpeg and using nvidia-h264 |
| preset-jetson-h264 | Nvidia Jetson with h264 stream | | | preset-jetson-h264 | Nvidia Jetson with h264 stream | |
| preset-jetson-h265 | Nvidia Jetson with h265 stream | | | preset-jetson-h265 | Nvidia Jetson with h265 stream | |
| preset-rk-h264 | Rockchip MPP with h264 stream | Use image with *-rk suffix and privileged mode |
| preset-rk-h265 | Rockchip MPP with h265 stream | Use image with *-rk suffix and privileged mode |
### Input Args Presets ### Input Args Presets

View File

@@ -3,8 +3,6 @@ id: hardware_acceleration
title: Hardware Acceleration title: Hardware Acceleration
--- ---
# Hardware Acceleration
It is recommended to update your configuration to enable hardware accelerated decoding in ffmpeg. Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro It is recommended to update your configuration to enable hardware accelerated decoding in ffmpeg. Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro
# Officially Supported # Officially Supported
@@ -15,13 +13,8 @@ Ensure you increase the allocated RAM for your GPU to at least 128 (raspi-config
**NOTICE**: If you are using the addon, you may need to turn off `Protection mode` for hardware acceleration. **NOTICE**: If you are using the addon, you may need to turn off `Protection mode` for hardware acceleration.
```yaml ```yaml
# if you want to decode a h264 stream
ffmpeg: ffmpeg:
hwaccel_args: preset-rpi-64-h264 hwaccel_args: preset-rpi-64-h264
# if you want to decode a h265 (hevc) stream
ffmpeg:
hwaccel_args: preset-rpi-64-h265
``` ```
:::note :::note
@@ -49,11 +42,7 @@ ffmpeg:
hwaccel_args: preset-vaapi hwaccel_args: preset-vaapi
``` ```
:::note **NOTICE**: With some of the processors, like the J4125, the default driver `iHD` doesn't seem to work correctly for hardware acceleration. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
With some of the processors, like the J4125, the default driver `iHD` doesn't seem to work correctly for hardware acceleration. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
:::
### Via Quicksync (>=10th Generation only) ### Via Quicksync (>=10th Generation only)
@@ -141,11 +130,7 @@ Depending on your OS and kernel configuration, you may need to change the `/proc
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
:::note **Note:** You also need to set `LIBVA_DRIVER_NAME=radeonsi` as an environment variable on the container.
You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
:::
```yaml ```yaml
ffmpeg: ffmpeg:
@@ -261,7 +246,7 @@ These instructions were originally based on the [Jellyfin documentation](https:/
# Community Supported # Community Supported
## NVIDIA Jetson (Orin AGX, Orin NX, Orin Nano\*, Xavier AGX, Xavier NX, TX2, TX1, Nano) ## NVIDIA Jetson (Orin AGX, Orin NX, Orin Nano*, Xavier AGX, Xavier NX, TX2, TX1, Nano)
A separate set of docker images is available that is based on Jetpack/L4T. They comes with an `ffmpeg` build A separate set of docker images is available that is based on Jetpack/L4T. They comes with an `ffmpeg` build
with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 4.6, use the with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 4.6, use the
@@ -334,57 +319,3 @@ ffmpeg:
If everything is working correctly, you should see a significant reduction in ffmpeg CPU load and power consumption. If everything is working correctly, you should see a significant reduction in ffmpeg CPU load and power consumption.
Verify that hardware decoding is working by running `jtop` (`sudo pip3 install -U jetson-stats`), which should show Verify that hardware decoding is working by running `jtop` (`sudo pip3 install -U jetson-stats`), which should show
that NVDEC/NVDEC1 are in use. that NVDEC/NVDEC1 are in use.
## Rockchip platform
Hardware accelerated video de-/encoding is supported on all Rockchip SoCs.
### Setup
Use a frigate docker image with `-rk` suffix and enable privileged mode by adding the `--privileged` flag to your docker run command or `privileged: true` to your `docker-compose.yml` file.
### Configuration
Add one of the following ffmpeg presets to your `config.yaml` to enable hardware acceleration:
```yaml
# if you try to decode a h264 encoded stream
ffmpeg:
hwaccel_args: preset-rk-h264
# if you try to decode a h265 (hevc) encoded stream
ffmpeg:
hwaccel_args: preset-rk-h265
```
:::note
Make sure that your SoC supports hardware acceleration for your input stream. For example, if your camera streams with h265 encoding and a 4k resolution, your SoC must be able to de- and encode h265 with a 4k resolution or higher. If you are unsure whether your SoC meets the requirements, take a look at the datasheet.
:::
### go2rtc presets for hardware accelerated transcoding
If your input stream is to be transcoded using hardware acceleration, there are these presets for go2rtc: `h264/rk` and `h265/rk`. You can use them this way:
```
go2rtc:
streams:
Cam_h264: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h264/rk
Cam_h265: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h265/rk
```
:::warning
The go2rtc docs may suggest the following configuration:
```
go2rtc:
streams:
Cam_h264: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h264#hardware=rk
Cam_h265: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h265#hardware=rk
```
However, this does not currently work.
:::

View File

@@ -25,9 +25,22 @@ cameras:
VSCode (and VSCode addon) supports the JSON schemas which will automatically validate the config. This can be added by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the top of the config file. `frigate_host` being the IP address of Frigate or `ccab4aaf-frigate` if running in the addon. VSCode (and VSCode addon) supports the JSON schemas which will automatically validate the config. This can be added by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the top of the config file. `frigate_host` being the IP address of Frigate or `ccab4aaf-frigate` if running in the addon.
### Environment Variable Substitution ### Full configuration reference:
Frigate supports the use of environment variables starting with `FRIGATE_` **only** where specifically indicated in the configuration reference below. For example, the following values can be replaced at runtime by using environment variables: :::caution
It is not recommended to copy this full configuration file. Only specify values that are different from the defaults. Configuration options and default values may change in future versions.
:::
**Note:** The following values will be replaced at runtime by using environment variables
- `{FRIGATE_MQTT_USER}`
- `{FRIGATE_MQTT_PASSWORD}`
- `{FRIGATE_RTSP_USER}`
- `{FRIGATE_RTSP_PASSWORD}`
for example:
```yaml ```yaml
mqtt: mqtt:
@@ -47,14 +60,6 @@ onvif:
password: "{FRIGATE_RTSP_PASSWORD}" password: "{FRIGATE_RTSP_PASSWORD}"
``` ```
### Full configuration reference:
:::caution
It is not recommended to copy this full configuration file. Only specify values that are different from the defaults. Configuration options and default values may change in future versions.
:::
```yaml ```yaml
mqtt: mqtt:
# Optional: Enable mqtt server (default: shown below) # Optional: Enable mqtt server (default: shown below)
@@ -70,11 +75,11 @@ mqtt:
# NOTE: must be unique if you are running multiple instances # NOTE: must be unique if you are running multiple instances
client_id: frigate client_id: frigate
# Optional: user # Optional: user
# NOTE: MQTT user can be specified with an environment variables or docker secrets that must begin with 'FRIGATE_'. # NOTE: MQTT user can be specified with an environment variables that must begin with 'FRIGATE_'.
# e.g. user: '{FRIGATE_MQTT_USER}' # e.g. user: '{FRIGATE_MQTT_USER}'
user: mqtt_user user: mqtt_user
# Optional: password # Optional: password
# NOTE: MQTT password can be specified with an environment variables or docker secrets that must begin with 'FRIGATE_'. # NOTE: MQTT password can be specified with an environment variables that must begin with 'FRIGATE_'.
# e.g. password: '{FRIGATE_MQTT_PASSWORD}' # e.g. password: '{FRIGATE_MQTT_PASSWORD}'
password: password password: password
# Optional: tls_ca_certs for enabling TLS using self-signed certs (default: None) # Optional: tls_ca_certs for enabling TLS using self-signed certs (default: None)
@@ -217,17 +222,15 @@ ffmpeg:
# Optional: Detect configuration # Optional: Detect configuration
# NOTE: Can be overridden at the camera level # NOTE: Can be overridden at the camera level
detect: detect:
# Optional: width of the frame for the input with the detect role (default: use native stream resolution) # Optional: width of the frame for the input with the detect role (default: shown below)
width: 1280 width: 1280
# Optional: height of the frame for the input with the detect role (default: use native stream resolution) # Optional: height of the frame for the input with the detect role (default: shown below)
height: 720 height: 720
# Optional: desired fps for your camera for the input with the detect role (default: shown below) # Optional: desired fps for your camera for the input with the detect role (default: shown below)
# NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera. # NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera.
fps: 5 fps: 5
# Optional: enables detection for the camera (default: True) # Optional: enables detection for the camera (default: True)
enabled: True enabled: True
# Optional: Number of consecutive detection hits required for an object to be initialized in the tracker. (default: 1/2 the frame rate)
min_initialized: 2
# Optional: Number of frames without a detection before Frigate considers an object to be gone. (default: 5x the frame rate) # Optional: Number of frames without a detection before Frigate considers an object to be gone. (default: 5x the frame rate)
max_disappeared: 25 max_disappeared: 25
# Optional: Configuration for stationary object tracking # Optional: Configuration for stationary object tracking
@@ -345,8 +348,8 @@ record:
# Optional: Number of minutes to wait between cleanup runs (default: shown below) # Optional: Number of minutes to wait between cleanup runs (default: shown below)
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o # This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
expire_interval: 60 expire_interval: 60
# Optional: Sync recordings with disk on startup and once a day (default: shown below). # Optional: Sync recordings with disk on startup (default: shown below).
sync_recordings: False sync_on_startup: False
# Optional: Retention settings for recording # Optional: Retention settings for recording
retain: retain:
# Optional: Number of days to retain recordings regardless of events (default: shown below) # Optional: Number of days to retain recordings regardless of events (default: shown below)
@@ -408,7 +411,7 @@ snapshots:
# Optional: print a timestamp on the snapshots (default: shown below) # Optional: print a timestamp on the snapshots (default: shown below)
timestamp: False timestamp: False
# Optional: draw bounding box on the snapshots (default: shown below) # Optional: draw bounding box on the snapshots (default: shown below)
bounding_box: True bounding_box: False
# Optional: crop the snapshot (default: shown below) # Optional: crop the snapshot (default: shown below)
crop: False crop: False
# Optional: height to resize the snapshot to (default: original size) # Optional: height to resize the snapshot to (default: original size)
@@ -433,7 +436,7 @@ rtmp:
enabled: False enabled: False
# Optional: Restream configuration # Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.8.3) # Uses https://github.com/AlexxIT/go2rtc (v1.8.1)
go2rtc: go2rtc:
# Optional: jsmpeg stream configuration for WebUI # Optional: jsmpeg stream configuration for WebUI
@@ -486,7 +489,7 @@ cameras:
# Required: A list of input streams for the camera. See documentation for more information. # Required: A list of input streams for the camera. See documentation for more information.
inputs: inputs:
# Required: the path to the stream # Required: the path to the stream
# NOTE: path may include environment variables or docker secrets, which must begin with 'FRIGATE_' and be referenced in {} # NOTE: path may include environment variables, which must begin with 'FRIGATE_' and be referenced in {}
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
# Required: list of roles for this stream. valid values are: audio,detect,record,rtmp # Required: list of roles for this stream. valid values are: audio,detect,record,rtmp
# NOTICE: In addition to assigning the audio, record, and rtmp roles, # NOTICE: In addition to assigning the audio, record, and rtmp roles,
@@ -515,9 +518,6 @@ cameras:
# to be replaced by a newer image. (default: shown below) # to be replaced by a newer image. (default: shown below)
best_image_timeout: 60 best_image_timeout: 60
# Optional: URL to visit the camera web UI directly from the system page. Might not be available on every camera.
webui_url: ""
# Optional: zones for this camera # Optional: zones for this camera
zones: zones:
# Required: name of the zone # Required: name of the zone

View File

@@ -10,7 +10,7 @@ Frigate has different live view options, some of which require the bundled `go2r
Live view options can be selected while viewing the live stream. The options are: Live view options can be selected while viewing the live stream. The options are:
| Source | Latency | Frame Rate | Resolution | Audio | Requires go2rtc | Other Limitations | | Source | Latency | Frame Rate | Resolution | Audio | Requires go2rtc | Other Limitations |
| ------ | ------- | ------------------------------------- | -------------- | ---------------------------- | --------------- | ------------------------------------------------ | | ------ | ------- | ------------------------------------- | -------------- | ---------------------------- | --------------- | ------------------------------------------------- |
| jsmpeg | low | same as `detect -> fps`, capped at 10 | same as detect | no | no | none | | jsmpeg | low | same as `detect -> fps`, capped at 10 | same as detect | no | no | none |
| mse | low | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only | | mse | low | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only |
| webrtc | lowest | native | native | yes (depends on audio codec) | yes | requires extra config, doesn't support h.265 | | webrtc | lowest | native | native | yes (depends on audio codec) | yes | requires extra config, doesn't support h.265 |
@@ -104,7 +104,6 @@ If you are having difficulties getting WebRTC to work and you are running Frigat
If not running in host mode, port 8555 will need to be mapped for the container: If not running in host mode, port 8555 will need to be mapped for the container:
docker-compose.yml docker-compose.yml
```yaml ```yaml
services: services:
frigate: frigate:
@@ -116,4 +115,4 @@ services:
::: :::
See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.3#module-webrtc) for more information about this. See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.1#module-webrtc) for more information about this.

View File

@@ -1,103 +0,0 @@
---
id: motion_detection
title: Motion Detection
---
# Tuning Motion Detection
Frigate uses motion detection as a first line check to see if there is anything happening in the frame worth checking with object detection.
Once motion is detected, it tries to group up nearby areas of motion together in hopes of identifying a rectangle in the image that will capture the area worth inspecting. These are the red "motion boxes" you see in the debug viewer.
## The Goal
The default motion settings should work well for the majority of cameras, however there are cases where tuning motion detection can lead to better and more optimal results. Each camera has its own environment with different variables that affect motion, this means that the same motion settings will not fit all of your cameras.
Before tuning motion it is important to understand the goal. In an optimal configuration, motion from people and cars would be detected, but not grass moving, lighting changes, timestamps, etc. If your motion detection is too sensitive, you will experience higher CPU loads and greater false positives from the increased rate of object detection. If it is not sensitive enough, you will miss events.
## Create Motion Masks
First, mask areas with regular motion not caused by the objects you want to detect. The best way to find candidates for motion masks is by watching the debug stream with motion boxes enabled. Good use cases for motion masks are timestamps or tree limbs and large bushes that regularly move due to wind. When possible, avoid creating motion masks that would block motion detection for objects you want to track **even if they are in locations where you don't want events**. Motion masks should not be used to avoid detecting objects in specific areas. More details can be found [in the masks docs.](/configuration/masks.md).
## Prepare For Testing
The easiest way to tune motion detection is to do it live, have one window / screen open with the frigate debug view and motion boxes enabled with another window / screen open allowing for configuring the motion settings. It is recommended to use Home Assistant or MQTT as they offer live configuration of some motion settings meaning that Frigate does not need to be restarted when values are changed.
In Home Assistant the `Improve Contrast`, `Contour Area`, and `Threshold` configuration entities are disabled by default but can easily be enabled and used to tune live, otherwise MQTT can be used.
## Tuning Motion Detection During The Day
Now that things are set up, find a time to tune that represents normal circumstances. For example, if you tune your motion on a day that is sunny and windy you may find later that the motion settings are not sensitive enough on a cloudy and still day.
:::note
Remember that motion detection is just used to determine when object detection should be used. You should aim to have motion detection sensitive enough that you won't miss events from objects you want to detect with object detection. The goal is to prevent object detection from running constantly for every small pixel change in the image. Windy days are still going to result in lots of motion being detected.
:::
### Threshold
The threshold value dictates how much of a change in a pixels luminance is required to be considered motion.
```yaml
# default threshold value
motion:
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
# The value should be between 1 and 255.
threshold: 30
```
Lower values mean motion detection is more sensitive to changes in color, making it more likely for example to detect motion when a brown dogs blends in with a brown fence or a person wearing a red shirt blends in with a red car. If the threshold is too low however, it may detect things like grass blowing in the wind, shadows, etc. to be detected as motion.
Watching the motion boxes in the debug view, increase the threshold until you only see motion that is visible to the eye. Once this is done, it is important to test and ensure that desired motion is still detected.
### Contour Area
```yaml
# default contour_area value
motion:
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below)
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
# make motion detection more sensitive to smaller moving objects.
# As a rule of thumb:
# - 10 - high sensitivity
# - 30 - medium sensitivity
# - 50 - low sensitivity
contour_area: 10
```
Once the threshold calculation is run, the pixels that have changed are grouped together. The contour area value is used to decide which groups of changed pixels qualify as motion. Smaller values are more sensitive meaning people that are far away, small animals, etc. are more likely to be detected as motion, but it also means that small changes in shadows, leaves, etc. are detected as motion. Higher values are less sensitive meaning these things won't be detected as motion but with the risk that desired motion won't be detected until closer to the camera.
Watching the motion boxes in the debug view, adjust the contour area until there are no motion boxes smaller than the smallest you'd expect frigate to detect something moving.
### Improve Contrast
At this point if motion is working as desired there is no reason to continue with tuning for the day. If you were unable to find a balance between desired and undesired motion being detected, you can try disabling improve contrast and going back to the threshold and contour area steps.
## Tuning Motion Detection During The Night
Once daytime motion detection is tuned, there is a chance that the settings will work well for motion detection during the night as well. If this is the case then the preferred settings can be written to the config file and left alone.
However, if the preferred day settings do not work well at night it is recommended to use HomeAssistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection.
## Tuning For Large Changes In Motion
```yaml
# default lightning_threshold:
motion:
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
# needs to recalibrate. (default: shown below)
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
# Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching
# a doorbell camera.
lightning_threshold: 0.8
```
:::tip
Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these events are not missed.
:::
Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in no motion detection. This is done via the `lightning_threshold` configuration. It is defined as the percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera.

View File

@@ -5,7 +5,7 @@ title: Object Detectors
# Officially Supported Detectors # Officially Supported Detectors
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, `tensorrt`, and `rknn`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
## CPU Detector (not recommended) ## CPU Detector (not recommended)
@@ -37,12 +37,6 @@ The EdgeTPU device can be specified using the `"device"` attribute according to
A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`.
:::tip
See [common Edge-TPU troubleshooting steps](/troubleshooting/edgetpu) if the EdgeTPu is not detected.
:::
### Single USB Coral ### Single USB Coral
```yaml ```yaml
@@ -297,101 +291,3 @@ To verify that the integration is working correctly, start Frigate and observe t
# Community Supported Detectors # Community Supported Detectors
## Rockchip RKNN-Toolkit-Lite2
This detector is only available if one of the following Rockchip SoCs is used:
- RK3588/RK3588S
- RK3568
- RK3566
- RK3562
These SoCs come with a NPU that will highly speed up detection.
### Setup
Use a frigate docker image with `-rk` suffix and enable privileged mode by adding the `--privileged` flag to your docker run command or `privileged: true` to your `docker-compose.yml` file.
### Configuration
This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for one). Lines that are required at least to use the detector are labeled as required, all other lines are optional.
```yaml
detectors: # required
rknn: # required
type: rknn # required
# core mask for npu
core_mask: 0
model: # required
# name of yolov8 model or path to your own .rknn model file
# possible values are:
# - default-yolov8n
# - default-yolov8s
# - default-yolov8m
# - default-yolov8l
# - default-yolov8x
# - /config/model_cache/rknn/your_custom_model.rknn
path: default-yolov8n
# width and height of detection frames
width: 320
height: 320
# pixel format of detection frame
# default value is rgb but yolov models usually use bgr format
input_pixel_format: bgr # required
# shape of detection frame
input_tensor: nhwc
```
Explanation for rknn specific options:
- **core mask** controls which cores of your NPU should be used. This option applies only to SoCs with a multicore NPU (at the time of writing this in only the RK3588/S). The easiest way is to pass the value as a binary number. To do so, use the prefix `0b` and write a `0` to disable a core and a `1` to enable a core, whereas the last digit coresponds to core0, the second last to core1, etc. You also have to use the cores in ascending order (so you can't use core0 and core2; but you can use core0 and core1). Enabling more cores can reduce the inference speed, especially when using bigger models (see section below). Examples:
- `core_mask: 0b000` or just `core_mask: 0` let the NPU decide which cores should be used. Default and recommended value.
- `core_mask: 0b001` use only core0.
- `core_mask: 0b011` use core0 and core1.
- `core_mask: 0b110` use core1 and core2. **This does not** work, since core0 is disabled.
### Choosing a model
There are 5 default yolov8 models that differ in size and therefore load the NPU more or less. In ascending order, with the top one being the smallest and least computationally intensive model:
| Model | Size in mb |
| ------- | ---------- |
| yolov8n | 9 |
| yolov8s | 25 |
| yolov8m | 54 |
| yolov8l | 90 |
| yolov8x | 136 |
:::tip
You can get the load of your NPU with the following command:
```bash
$ cat /sys/kernel/debug/rknpu/load
>> NPU load: Core0: 0%, Core1: 0%, Core2: 0%,
```
:::
- By default the rknn detector uses the yolov8n model (`model: path: default-yolov8n`). This model comes with the image, so no further steps than those mentioned above are necessary.
- If you want to use a more precise model, you can pass `default-yolov8s`, `default-yolov8m`, `default-yolov8l` or `default-yolov8x` as `model: path:` option.
- If the model does not exist, it will be automatically downloaded to `/config/model_cache/rknn`.
- If your server has no internet connection, you can download the model from [this Github repository](https://github.com/MarcA711/rknn-models/releases) using another device and place it in the `config/model_cache/rknn` on your system.
- Finally, you can also provide your own model. Note that only yolov8 models are currently supported. Moreover, you will need to convert your model to the rknn format using `rknn-toolkit2` on a x86 machine. Afterwards, you can place your `.rknn` model file in the `config/model_cache/rknn` directory on your system. Then you need to pass the path to your model using the `path` option of your `model` block like this:
```yaml
model:
path: /config/model_cache/rknn/my-rknn-model.rknn
```
:::tip
When you have a multicore NPU, you can enable all cores to reduce inference times. You should consider activating all cores if you use a larger model like yolov8l. If your NPU has 3 cores (like rk3588/S SoCs), you can enable all 3 cores using:
```yaml
detectors:
rknn:
type: rknn
core_mask: 0b111
```
:::

View File

@@ -13,30 +13,7 @@ H265 recordings can be viewed in Chrome 108+, Edge and Safari only. All other br
As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 hours of recordings will be deleted. As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 hours of recordings will be deleted.
## Configuring Recording Retention ## What if I don't want 24/7 recordings?
Frigate supports both 24/7 and event based recordings with separate retention modes and retention periods.
:::tip
Retention configs support decimals meaning they can be configured to retain `0.5` days, for example.
:::
### 24/7 Recording
The number of days to retain 24/7 recordings can be set via the following config where X is a number, by default 24/7 recording is disabled.
```yaml
record:
enabled: True
retain:
days: 1 # <- number of days to keep 24/7 recordings
```
24/7 recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean)
### Event Recording
If you only used clips in previous versions with recordings disabled, you can use the following config to get the same behavior. This is also the default behavior when recordings are enabled. If you only used clips in previous versions with recordings disabled, you can use the following config to get the same behavior. This is also the default behavior when recordings are enabled.
@@ -45,11 +22,17 @@ record:
enabled: True enabled: True
events: events:
retain: retain:
default: 10 # <- number of days to keep event recordings default: 10
``` ```
This configuration will retain recording segments that overlap with events and have active tracked objects for 10 days. Because multiple events can reference the same recording segments, this avoids storing duplicate footage for overlapping events and reduces overall storage needs. This configuration will retain recording segments that overlap with events and have active tracked objects for 10 days. Because multiple events can reference the same recording segments, this avoids storing duplicate footage for overlapping events and reduces overall storage needs.
When `retain -> days` is set to `0`, segments will be deleted from the cache if no events are in progress.
## Can I have "24/7" recordings, but only at certain times?
Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only record in certain situations or at certain times.
**WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect. **WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect.
## What do the different retain modes mean? ## What do the different retain modes mean?
@@ -98,21 +81,17 @@ record:
car: 7 car: 7
``` ```
## Can I have "24/7" recordings, but only at certain times?
Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only record in certain situations or at certain times.
## How do I export recordings? ## How do I export recordings?
The export page in the Frigate WebUI allows for exporting real time clips with a designated start and stop time as well as exporting a time-lapse for a designated start and stop time. These exports can take a while so it is important to leave the file until it is no longer in progress. The export page in the Frigate WebUI allows for exporting real time clips with a designated start and stop time as well as exporting a timelapse for a designated start and stop time. These exports can take a while so it is important to leave the file until it is no longer in progress.
## Syncing Recordings With Disk ## Syncing Recordings With Disk
In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist. In some cases the recordings files may be deleted but Frigate will not know this has happened. Sync on startup can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist.
```yaml ```yaml
record: record:
sync_recordings: True sync_on_startup: True
``` ```
:::warning :::warning

View File

@@ -7,7 +7,7 @@ title: Restream
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.8.4) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#configuration) for more advanced configurations and features. Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.8.1) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.1#configuration) for more advanced configurations and features.
:::note :::note
@@ -18,7 +18,6 @@ You can access the go2rtc webUI at `http://frigate_ip:5000/live/webrtc` which ca
### Birdseye Restream ### Birdseye Restream
Birdseye RTSP restream can be accessed at `rtsp://<frigate_host>:8554/birdseye`. Enabling the birdseye restream will cause birdseye to run 24/7 which may increase CPU usage somewhat. Birdseye RTSP restream can be accessed at `rtsp://<frigate_host>:8554/birdseye`. Enabling the birdseye restream will cause birdseye to run 24/7 which may increase CPU usage somewhat.
```yaml ```yaml
birdseye: birdseye:
restream: true restream: true
@@ -33,7 +32,8 @@ go2rtc:
rtsp: rtsp:
username: "admin" username: "admin"
password: "pass" password: "pass"
streams: ... streams:
...
``` ```
**NOTE:** This does not apply to localhost requests, there is no need to provide credentials when using the restream as a source for frigate cameras. **NOTE:** This does not apply to localhost requests, there is no need to provide credentials when using the restream as a source for frigate cameras.
@@ -138,7 +138,7 @@ cameras:
## Advanced Restream Configurations ## Advanced Restream Configurations
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.8.1#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
NOTE: The output will need to be passed with two curly braces `{{output}}` NOTE: The output will need to be passed with two curly braces `{{output}}`

View File

@@ -95,7 +95,7 @@ The following commands are used inside the container to ensure hardware accelera
**Raspberry Pi (64bit)** **Raspberry Pi (64bit)**
This should show less than 50% CPU in top, and ~80% CPU without `-c:v h264_v4l2m2m`. This should show <50% CPU in top, and ~80% CPU without `-c:v h264_v4l2m2m`.
```shell ```shell
ffmpeg -c:v h264_v4l2m2m -re -stream_loop -1 -i https://streams.videolan.org/ffmpeg/incoming/720p60.mp4 -f rawvideo -pix_fmt yuv420p pipe: > /dev/null ffmpeg -c:v h264_v4l2m2m -re -stream_loop -1 -i https://streams.videolan.org/ffmpeg/incoming/720p60.mp4 -f rawvideo -pix_fmt yuv420p pipe: > /dev/null
@@ -131,7 +131,7 @@ ffmpeg -c:v h264_qsv -re -stream_loop -1 -i https://streams.videolan.org/ffmpeg/
- [Frigate source code](#frigate-core-web-and-docs) - [Frigate source code](#frigate-core-web-and-docs)
- All [core](#core) prerequisites _or_ another running Frigate instance locally available - All [core](#core) prerequisites _or_ another running Frigate instance locally available
- Node.js 20 - Node.js 16
### Making changes ### Making changes
@@ -183,7 +183,7 @@ npm run test
### Prerequisites ### Prerequisites
- [Frigate source code](#frigate-core-web-and-docs) - [Frigate source code](#frigate-core-web-and-docs)
- Node.js 20 - Node.js 16
### Making changes ### Making changes
@@ -201,7 +201,7 @@ npm run start
This command starts a local development server and open up a browser window. Most changes are reflected live without having to restart the server. This command starts a local development server and open up a browser window. Most changes are reflected live without having to restart the server.
The docs are built using [Docusaurus v3](https://docusaurus.io). Please refer to the Docusaurus docs for more information on how to modify Frigate's documentation. The docs are built using [Docusaurus v2](https://v2.docusaurus.io). Please refer to the Docusaurus docs for more information on how to modify Frigate's documentation.
#### 3. Build (optional) #### 3. Build (optional)

View File

@@ -9,7 +9,7 @@ Cameras that output H.264 video and AAC audio will offer the most compatibility
I recommend Dahua, Hikvision, and Amcrest in that order. Dahua edges out Hikvision because they are easier to find and order, not because they are better cameras. I personally use Dahua cameras because they are easier to purchase directly. In my experience Dahua and Hikvision both have multiple streams with configurable resolutions and frame rates and rock solid streams. They also both have models with large sensors well known for excellent image quality at night. Not all the models are equal. Larger sensors are better than higher resolutions; especially at night. Amcrest is the fallback recommendation because they are rebranded Dahuas. They are rebranding the lower end models with smaller sensors or less configuration options. I recommend Dahua, Hikvision, and Amcrest in that order. Dahua edges out Hikvision because they are easier to find and order, not because they are better cameras. I personally use Dahua cameras because they are easier to purchase directly. In my experience Dahua and Hikvision both have multiple streams with configurable resolutions and frame rates and rock solid streams. They also both have models with large sensors well known for excellent image quality at night. Not all the models are equal. Larger sensors are better than higher resolutions; especially at night. Amcrest is the fallback recommendation because they are rebranded Dahuas. They are rebranding the lower end models with smaller sensors or less configuration options.
Many users have reported various issues with Reolink cameras, so I do not recommend them. If you are using Reolink, I suggest the [Reolink specific configuration](../configuration/camera_specific.md#reolink-cameras). Wifi cameras are also not recommended. Their streams are less reliable and cause connection loss and/or lost video data. Many users have reported various issues with Reolink cameras, so I do not recommend them. If you are using Reolink, I suggest the [Reolink specific configuration](../configuration/camera_specific.md#reolink-410520-possibly-others). Wifi cameras are also not recommended. Their streams are less reliable and cause connection loss and/or lost video data.
Here are some of the camera's I recommend: Here are some of the camera's I recommend:
@@ -95,16 +95,6 @@ Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powe
Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time. Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time.
#### Rockchip SoC
Frigate supports SBCs with the following Rockchip SoCs:
- RK3566/RK3568
- RK3588/RK3588S
- RV1103/RV1106
- RK3562
Using the yolov8n model and an Orange Pi 5 Plus with RK3588 SoC inference speeds vary between 20 - 25 ms.
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version) ## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity. This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.

View File

@@ -47,12 +47,6 @@ services:
... ...
``` ```
:::caution
Users of the Snapcraft build of Docker cannot use storage locations outside your $HOME folder.
:::
### Calculating required shm-size ### Calculating required shm-size
Frigate utilizes shared memory to store frames during processing. The default `shm-size` provided by Docker is **64MB**. Frigate utilizes shared memory to store frames during processing. The default `shm-size` provided by Docker is **64MB**.
@@ -78,6 +72,7 @@ $ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 9 + 270480) / 1048576)
The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration. The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration.
### Raspberry Pi 3/4 ### Raspberry Pi 3/4
By default, the Raspberry Pi limits the amount of memory available to the GPU. In order to use ffmpeg hardware acceleration, you must increase the available memory by setting `gpu_mem` to the maximum recommended value in `config.txt` as described in the [official docs](https://www.raspberrypi.org/documentation/computers/config_txt.html#memory-options). By default, the Raspberry Pi limits the amount of memory available to the GPU. In order to use ffmpeg hardware acceleration, you must increase the available memory by setting `gpu_mem` to the maximum recommended value in `config.txt` as described in the [official docs](https://www.raspberrypi.org/documentation/computers/config_txt.html#memory-options).
@@ -86,7 +81,22 @@ Additionally, the USB Coral draws a considerable amount of power. If using any o
## Docker ## Docker
Running in Docker with compose is the recommended install method. Running in Docker with compose is the recommended install method:
:::note
The following officially supported builds are available:
`ghcr.io/blakeblackshear/frigate:stable` - Standard Frigate build for amd64 & RPi Optimized Frigate build for arm64
`ghcr.io/blakeblackshear/frigate:stable-standard-arm64` - Standard Frigate build for arm64
`ghcr.io/blakeblackshear/frigate:stable-tensorrt` - Frigate build specific for amd64 devices running an nvidia GPU
The following community supported builds are available:
`ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5
`ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp4` - Frigate build optimized for nvidia Jetson devices running Jetpack 4.6
:::
```yaml ```yaml
version: "3.9" version: "3.9"
@@ -139,18 +149,6 @@ docker run -d \
ghcr.io/blakeblackshear/frigate:stable ghcr.io/blakeblackshear/frigate:stable
``` ```
The official docker image tags for the current stable version are:
- `stable` - Standard Frigate build for amd64 & RPi Optimized Frigate build for arm64
- `stable-standard-arm64` - Standard Frigate build for arm64
- `stable-tensorrt` - Frigate build specific for amd64 devices running an nvidia GPU
The community supported docker image tags for the current stable version are:
- `stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5
- `stable-tensorrt-jp4` - Frigate build optimized for nvidia Jetson devices running Jetpack 4.6
- `stable-rk` - Frigate build for SBCs with Rockchip SoC
## Home Assistant Addon ## Home Assistant Addon
:::caution :::caution
@@ -158,7 +156,6 @@ The community supported docker image tags for the current stable version are:
As of HomeAssistant OS 10.2 and Core 2023.6 defining separate network storage for media is supported. As of HomeAssistant OS 10.2 and Core 2023.6 defining separate network storage for media is supported.
There are important limitations in Home Assistant Operating System to be aware of: There are important limitations in Home Assistant Operating System to be aware of:
- Separate local storage for media is not yet supported by Home Assistant - Separate local storage for media is not yet supported by Home Assistant
- AMD GPUs are not supported because HA OS does not include the mesa driver. - AMD GPUs are not supported because HA OS does not include the mesa driver.
- Nvidia GPUs are not supported because addons do not support the nvidia runtime. - Nvidia GPUs are not supported because addons do not support the nvidia runtime.
@@ -213,6 +210,7 @@ If you're running Frigate on a rack mounted server and want to passthough the Go
These settings were tested on DSM 7.1.1-42962 Update 4 These settings were tested on DSM 7.1.1-42962 Update 4
**General:** **General:**
The `Execute container using high privilege` option needs to be enabled in order to give the frigate container the elevated privileges it may need. The `Execute container using high privilege` option needs to be enabled in order to give the frigate container the elevated privileges it may need.
@@ -221,12 +219,14 @@ The `Enable auto-restart` option can be enabled if you want the container to aut
![image](https://user-images.githubusercontent.com/4516296/232586790-0b659a82-561d-4bc5-899b-0f5b39c6b11d.png) ![image](https://user-images.githubusercontent.com/4516296/232586790-0b659a82-561d-4bc5-899b-0f5b39c6b11d.png)
**Advanced Settings:** **Advanced Settings:**
If you want to use the password template feature, you should add the "FRIGATE_RTSP_PASSWORD" environment variable and set it to your preferred password under advanced settings. The rest of the environment variables should be left as default for now. If you want to use the password template feature, you should add the "FRIGATE_RTSP_PASSWORD" environment variable and set it to your preferred password under advanced settings. The rest of the environment variables should be left as default for now.
![image](https://user-images.githubusercontent.com/4516296/232587163-0eb662d4-5e28-4914-852f-9db1ec4b9c3d.png) ![image](https://user-images.githubusercontent.com/4516296/232587163-0eb662d4-5e28-4914-852f-9db1ec4b9c3d.png)
**Port Settings:** **Port Settings:**
The network mode should be set to `bridge`. You need to map the default frigate container ports to your local Synology NAS ports that you want to use to access Frigate. The network mode should be set to `bridge`. You need to map the default frigate container ports to your local Synology NAS ports that you want to use to access Frigate.
@@ -235,6 +235,7 @@ There may be other services running on your NAS that are using the same ports th
![image](https://user-images.githubusercontent.com/4516296/232582642-773c0e37-7ef5-4373-8ce3-41401b1626e6.png) ![image](https://user-images.githubusercontent.com/4516296/232582642-773c0e37-7ef5-4373-8ce3-41401b1626e6.png)
**Volume Settings:** **Volume Settings:**
You need to configure 2 paths: You need to configure 2 paths:
@@ -256,7 +257,6 @@ QNAP has a graphic tool named Container Station to install and manage docker con
Because of above limitations, the installation has to be done from command line. Here are the steps: Because of above limitations, the installation has to be done from command line. Here are the steps:
**Preparation** **Preparation**
1. Install Container Station from QNAP App Center if it is not installed. 1. Install Container Station from QNAP App Center if it is not installed.
2. Enable ssh on your QNAP (please do an Internet search on how to do this). 2. Enable ssh on your QNAP (please do an Internet search on how to do this).
3. Prepare Frigate config file, name it `config.yml`. 3. Prepare Frigate config file, name it `config.yml`.
@@ -267,8 +267,7 @@ Because of above limitations, the installation has to be done from command line.
**Installation** **Installation**
Run the following commands to install Frigate (using `stable` version as example): Run the following commands to install Frigate (using `stable` version as example):
```bash
```shell
# Download Frigate image # Download Frigate image
docker pull ghcr.io/blakeblackshear/frigate:stable docker pull ghcr.io/blakeblackshear/frigate:stable
# Create directory to host Frigate config file on QNAP file system. # Create directory to host Frigate config file on QNAP file system.
@@ -310,3 +309,5 @@ docker run \
``` ```
Log into QNAP, open Container Station. Frigate docker container should be listed under 'Overview' and running. Visit Frigate Web UI by clicking Frigate docker, and then clicking the URL shown at the top of the detail page. Log into QNAP, open Container Station. Frigate docker container should be listed under 'Overview' and running. Visit Frigate Web UI by clicking Frigate docker, and then clicking the URL shown at the top of the detail page.

View File

@@ -1,67 +0,0 @@
---
id: video_pipeline
title: Video pipeline
---
Frigate uses a sophisticated video pipeline that starts with the camera feed and progressively applies transformations to it (e.g. decoding, motion detection, etc.).
This guide provides an overview to help users understand some of the key Frigate concepts.
## Overview
At a high level, there are five processing steps that could be applied to a camera feed
```mermaid
%%{init: {"themeVariables": {"edgeLabelBackground": "transparent"}}}%%
flowchart LR
Feed(Feed\nacquisition) --> Decode(Video\ndecoding)
Decode --> Motion(Motion\ndetection)
Motion --> Object(Object\ndetection)
Feed --> Recording(Recording\nand\nvisualization)
Motion --> Recording
Object --> Recording
```
As the diagram shows, all feeds first need to be acquired. Depending on the data source, it may be as simple as using FFmpeg to connect to an RTSP source via TCP or something more involved like connecting to an Apple Homekit camera using go2rtc. A single camera can produce a main (i.e. high resolution) and a sub (i.e. lower resolution) video feed.
Typically, the sub-feed will be decoded to produce full-frame images. As part of this process, the resolution may be downscaled and an image sampling frequency may be imposed (e.g. keep 5 frames per second).
These frames will then be compared over time to detect movement areas (a.k.a. motion boxes). These motion boxes are combined into motion regions and are analyzed by a machine learning model to detect known objects. Finally, the snapshot and recording retention config will decide what video clips and events should be saved.
## Detailed view of the video pipeline
The following diagram adds a lot more detail than the simple view explained before. The goal is to show the detailed data paths between the processing steps.
```mermaid
%%{init: {"themeVariables": {"edgeLabelBackground": "transparent"}}}%%
flowchart TD
RecStore[(Recording\nstore)]
SnapStore[(Snapshot\nstore)]
subgraph Acquisition
Cam["Camera"] -->|FFmpeg supported| Stream
Cam -->|"Other streaming\nprotocols"| go2rtc
go2rtc("go2rtc") --> Stream
Stream[Capture main and\nsub streams] --> |detect stream|Decode(Decode and\ndownscale)
end
subgraph Motion
Decode --> MotionM(Apply\nmotion masks)
MotionM --> MotionD(Motion\ndetection)
end
subgraph Detection
MotionD --> |motion regions| ObjectD(Object detection)
Decode --> ObjectD
ObjectD --> ObjectFilter(Apply object filters & zones)
ObjectFilter --> ObjectZ(Track objects)
end
Decode --> |decoded frames|Birdseye
MotionD --> |motion event|Birdseye
ObjectZ --> |object event|Birdseye
MotionD --> |"video segments\n(retain motion)"|RecStore
ObjectZ --> |detection clip|RecStore
Stream -->|"video segments\n(retain all)"| RecStore
ObjectZ --> |detection snapshot|SnapStore
```

View File

@@ -3,8 +3,6 @@ id: configuring_go2rtc
title: Configuring go2rtc title: Configuring go2rtc
--- ---
# Configuring go2rtc
Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features: Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features:
- WebRTC or MSE for live viewing with higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream - WebRTC or MSE for live viewing with higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream
@@ -13,7 +11,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
# Setup a go2rtc stream # Setup a go2rtc stream
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#module-streams), not just rtsp. First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.8.1#module-streams), not just rtsp.
```yaml ```yaml
go2rtc: go2rtc:
@@ -26,7 +24,7 @@ The easiest live view to get working is MSE. After adding this to the config, re
### What if my video doesn't play? ### What if my video doesn't play?
If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then the video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration: If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then the video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.8.1#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.8.1#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration:
```yaml ```yaml
go2rtc: go2rtc:

View File

@@ -3,145 +3,7 @@ id: getting_started
title: Getting started title: Getting started
--- ---
# Getting Started This guide walks through the steps to build a configuration file for Frigate. It assumes that you already have an environment setup as described in [Installation](../frigate/installation.md). You should also configure your cameras according to the [camera setup guide](/frigate/camera_setup). Pay particular attention to the section on choosing a detect resolution.
## Setting up hardware
This section guides you through setting up a server with Debian Bookworm and Docker. If you already have an environment with Linux and Docker installed, you can continue to [Installing Frigate](#installing-frigate) below.
### Install Debian 12 (Bookworm)
There are many guides on how to install Debian Server, so this will be an abbreviated guide. Connect a temporary monitor and keyboard to your device so you can install a minimal server without a desktop environment.
#### Prepare installation media
1. Download the small installation image from the [Debian website](https://www.debian.org/distrib/netinst)
1. Flash the ISO to a USB device (popular tool is [balena Etcher](https://etcher.balena.io/))
1. Boot your device from USB
#### Install and setup Debian for remote access
1. Ensure your device is connected to the network so updates and software options can be installed
1. Choose the non-graphical install option if you don't have a mouse connected, but either install method works fine
1. You will be prompted to set the root user password and create a user with a password
1. Install the minimum software. Fewer dependencies result in less maintenance.
1. Uncheck "Debian desktop environment" and "GNOME"
1. Check "SSH server"
1. Keep "standard system utilities" checked
1. After reboot, login as root at the command prompt to add user to sudoers
1. Install sudo
```bash
apt update && apt install -y sudo
```
1. Add the user you created to the sudo group (change `blake` to your own user)
```bash
usermod -aG sudo blake
```
1. Shutdown by running `poweroff`
At this point, you can install the device in a permanent location. The remaining steps can be performed via SSH from another device. If you don't have an SSH client, you can install one of the options listed in the [Visual Studio Code documentation](https://code.visualstudio.com/docs/remote/troubleshooting#_installing-a-supported-ssh-client).
#### Finish setup via SSH
1. Connect via SSH and login with your non-root user created during install
1. Setup passwordless sudo so you don't have to type your password for each sudo command (change `blake` in the command below to your user)
```bash
echo 'blake ALL=(ALL) NOPASSWD:ALL' | sudo tee /etc/sudoers.d/user
```
1. Logout and login again to activate passwordless sudo
1. Setup automatic security updates for the OS (optional)
1. Ensure everything is up to date by running
```bash
sudo apt update && sudo apt upgrade -y
```
1. Install unattended upgrades
```bash
sudo apt install -y unattended-upgrades
echo unattended-upgrades unattended-upgrades/enable_auto_updates boolean true | sudo debconf-set-selections
sudo dpkg-reconfigure -f noninteractive unattended-upgrades
```
Now you have a minimal Debian server that requires very little maintenance.
### Install Docker
1. Install Docker Engine (not Docker Desktop) using the [official docs](https://docs.docker.com/engine/install/debian/)
1. Specifically, follow the steps in the [Install using the apt repository](https://docs.docker.com/engine/install/debian/#install-using-the-repository) section
2. Add your user to the docker group as described in the [Linux postinstall steps](https://docs.docker.com/engine/install/linux-postinstall/)
## Installing Frigate
This section shows how to create a minimal directory structure for a Docker installation on Debian. If you have installed Frigate as a Home Assistant addon or another way, you can continue to [Configuring Frigate](#configuring-frigate).
### Setup directories
Frigate requires a valid config file to start. The following directory structure is the bare minimum to get started. Once Frigate is running, you can use the built-in config editor which supports config validation.
```
.
├── docker-compose.yml
├── config/
│ └── config.yml
└── storage/
```
This will create the above structure:
```bash
mkdir storage config && touch docker-compose.yml config/config.yml
```
If you are setting up Frigate on a Linux device via SSH, you can use [nano](https://itsfoss.com/nano-editor-guide/) to edit the following files. If you prefer to edit remote files with a full editor instead of a terminal, I recommend using [Visual Studio Code](https://code.visualstudio.com/) with the [Remote SSH extension](https://code.visualstudio.com/docs/remote/ssh-tutorial).
:::note
This `docker-compose.yml` file is just a starter for amd64 devices. You will need to customize it for your setup as detailed in the [Installation docs](/frigate/installation#docker).
:::
`docker-compose.yml`
```yaml
version: "3.9"
services:
frigate:
container_name: frigate
restart: unless-stopped
image: ghcr.io/blakeblackshear/frigate:stable
volumes:
- ./config:/config
- ./storage:/media/frigate
- type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear
target: /tmp/cache
tmpfs:
size: 1000000000
ports:
- "5000:5000"
- "8554:8554" # RTSP feeds
```
`config.yml`
```yaml
mqtt:
enabled: False
cameras:
dummy_camera: # <--- this will be changed to your actual camera later
enabled: False
ffmpeg:
inputs:
- path: rtsp://127.0.0.1:554/rtsp
roles:
- detect
```
Now you should be able to start Frigate by running `docker compose up -d` from within the folder containing `docker-compose.yml`. Frigate should now be accessible at `server_ip:5000` and you can finish the configuration using the built-in configuration editor.
## Configuring Frigate
This section assumes that you already have an environment setup as described in [Installation](../frigate/installation.md). You should also configure your cameras according to the [camera setup guide](/frigate/camera_setup). Pay particular attention to the section on choosing a detect resolution.
### Step 1: Add a detect stream ### Step 1: Add a detect stream
@@ -153,7 +15,6 @@ mqtt:
cameras: cameras:
name_of_your_camera: # <------ Name the camera name_of_your_camera: # <------ Name the camera
enabled: True
ffmpeg: ffmpeg:
inputs: inputs:
- path: rtsp://10.0.10.10:554/rtsp # <----- The stream you want to use for detection - path: rtsp://10.0.10.10:554/rtsp # <----- The stream you want to use for detection
@@ -175,21 +36,7 @@ FFmpeg arguments for other types of cameras can be found [here](../configuration
Now that you have a working camera configuration, you want to setup hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration.md) config reference for examples applicable to your hardware. Now that you have a working camera configuration, you want to setup hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration.md) config reference for examples applicable to your hardware.
Here is an example configuration with hardware acceleration configured to work with most Intel processors with an integrated GPU using the [preset](../configuration/ffmpeg_presets.md): Here is an example configuration with hardware acceleration configured for Intel processors with an integrated GPU using the [preset](../configuration/ffmpeg_presets.md):
`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes)
```yaml
version: "3.9"
services:
frigate:
...
devices:
- /dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware
...
```
`config.yml`
```yaml ```yaml
mqtt: ... mqtt: ...
@@ -206,19 +53,6 @@ cameras:
By default, Frigate will use a single CPU detector. If you have a USB Coral, you will need to add a detectors section to your config. By default, Frigate will use a single CPU detector. If you have a USB Coral, you will need to add a detectors section to your config.
`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes)
```yaml
version: "3.9"
services:
frigate:
...
devices:
- /dev/bus/usb:/dev/bus/usb # passes the USB Coral, needs to be modified for other versions
- /dev/apex_0:/dev/apex_0 # passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
...
```
```yaml ```yaml
mqtt: ... mqtt: ...

View File

@@ -125,7 +125,7 @@ This section points to your SSL files, the example below shows locations to a de
### Setup reverse proxy settings ### Setup reverse proxy settings
The settings below enabled connection upgrade, sets up logging (optional) and proxies everything from the `/` context to the docker host and port specified earlier in the configuration Thhe settings below enabled connection upgrade, sets up logging (optional) and proxies everything from the `/` context to the docker host and port specified earlier in the configuration
``` ```
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;

View File

@@ -263,15 +263,6 @@ Returns the snapshot image from the latest event for the given camera and label
Returns the snapshot image from the specific point in that cameras recordings. Returns the snapshot image from the specific point in that cameras recordings.
### `GET /api/<camera_name>/grid.jpg`
Returns the latest camera image with the regions grid overlaid.
| param | Type | Description |
| ------------ | ----- | ------------------------------------------------------------------------------------------ |
| `color` | str | The color of the grid (red,green,blue,black,white). Defaults to "green". |
| `font_scale` | float | Font scale. Can be used to increase font size on high resolution cameras. Defaults to 0.5. |
### `GET /clips/<camera>-<id>.jpg` ### `GET /clips/<camera>-<id>.jpg`
JPG snapshot for the given camera and event id. JPG snapshot for the given camera and event id.
@@ -302,14 +293,6 @@ It is also possible to export this recording as a timelapse.
} }
``` ```
### `DELETE /api/export/<export_name>`
Delete an export from disk.
### `PATCH /api/export/<export_name_current>/<export_name_new>`
Renames an export.
### `GET /api/<camera_name>/recordings/summary` ### `GET /api/<camera_name>/recordings/summary`
Hourly summary of recordings data for a camera. Hourly summary of recordings data for a camera.
@@ -378,7 +361,3 @@ Recording retention config still applies to manual events, if frigate is configu
### `PUT /api/events/<event_id>/end` ### `PUT /api/events/<event_id>/end`
End a specific manual event without a predetermined length. End a specific manual event without a predetermined length.
### `POST /api/restart`
Restarts Frigate process.

View File

@@ -177,7 +177,7 @@ The Frigate integration seamlessly supports the use of multiple Frigate servers.
In order for multiple Frigate instances to function correctly, the In order for multiple Frigate instances to function correctly, the
`topic_prefix` and `client_id` parameters must be set differently per server. `topic_prefix` and `client_id` parameters must be set differently per server.
See [MQTT See [MQTT
configuration](mqtt) configuration](mqtt.md)
for how to set these. for how to set these.
#### API URLs #### API URLs

View File

@@ -221,10 +221,6 @@ Topic to turn the PTZ autotracker for a camera on and off. Expected values are `
Topic with current state of the PTZ autotracker for a camera. Published values are `ON` and `OFF`. Topic with current state of the PTZ autotracker for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/ptz_autotracker/active`
Topic to determine if PTZ autotracker is actively tracking an object. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/birdseye/set` ### `frigate/<camera_name>/birdseye/set`
Topic to turn Birdseye for a camera on and off. Expected values are `ON` and `OFF`. Birdseye mode Topic to turn Birdseye for a camera on and off. Expected values are `ON` and `OFF`. Birdseye mode

View File

@@ -19,7 +19,7 @@ Once logged in, you can generate an API key for Frigate in Settings.
### Set your API key ### Set your API key
In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the `SEND TO FRIGATE+` buttons on the events page. Home Assistant Addon users can set it under Settings > Addons > Frigate NVR > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch). In Frigate, you can set the `PLUS_API_KEY` environment variable to enable the `SEND TO FRIGATE+` buttons on the events page. You can set it in your Docker Compose file or in your Docker run command. Home Assistant Addon users can set it under Settings > Addons > Frigate NVR > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch).
:::caution :::caution

View File

@@ -9,34 +9,6 @@ With a subscription, and at each annual renewal, you will receive 12 model train
Information on how to integrate Frigate+ with Frigate can be found in the [integrations docs](/integrations/plus). Information on how to integrate Frigate+ with Frigate can be found in the [integrations docs](/integrations/plus).
## Improving your model
You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. Because a limited number of users submitted images to Frigate+ prior to this launch, you may need to submit several hundred images per camera to see good results. With all the new images now being submitted, future base models will improve as more and more users (including you) submit examples to Frigate+.
False positives can be reduced by submitting **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
You may find that it's helpful to lower your thresholds a little in order to generate more false/true positives near the threshold value. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores.
Note that only verified images will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
In order to request your first model, you will need to have annotated and verified at least 10 images. Each subsequent model request will require that 10 additional images are verified. However, this is the bare minimum. For the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night.
As circumstances change, you may need to submit new examples to address new types of false positives. For example, the change from summer days to snowy winter days or other changes such as a new grill or patio furniture may require additional examples and training.
## Properly labeling images
For the best results, follow the following guidelines.
**Label every object in the image**: It is important that you label all objects in each image before verifying. If you don't label a car for example, the model will be taught that part of the image is _not_ a car and it will start to get confused.
**Make tight bounding boxes**: Tighter bounding boxes improve the recognition and ensure that accurate bounding boxes are predicted at runtime.
**Label the full object even when occluded**: If you have a person standing behind a car, label the full person even though a portion of their body may be hidden behind the car. This helps predict accurate bounding boxes and improves zone accuracy and filters at runtime.
**`amazon`, `ups`, and `fedex` should label the logo**: For a Fedex truck, label the truck as a `car` and make a different bounding box just for the Fedex logo. If there are multiple logos, label each of them.
![Fedex Logo](/img/plus/fedex-logo.jpg)
## Frequently asked questions ## Frequently asked questions
### Are my models trained just on my image uploads? How are they built? ### Are my models trained just on my image uploads? How are they built?
@@ -45,7 +17,7 @@ Frigate+ models are built by fine tuning a base model with the images you have a
### What is a training credit and how do I use them? ### What is a training credit and how do I use them?
Essentially, `1 training credit = 1 trained model`. When you have uploaded, annotated, and verified additional images and you are ready to train your model, you will submit a model request which will use one credit. The model that is trained will utilize all of the verified images in your account. When new base models are available, it will require the use of a training credit to generate a new user model on the new base model. Essentially, `1 training credit = 1 trained model`. When you have uploaded, annotated, and verified additional images and you are ready to train your model, you will submit a model request which will use one credit. The model that is trained will utilize all of the verified images in your account.
### Are my video feeds sent to the cloud for analysis when using Frigate+ models? ### Are my video feeds sent to the cloud for analysis when using Frigate+ models?
@@ -137,3 +109,31 @@ When using Frigate+ models, Frigate will choose the snapshot of a person object
`amazon`, `ups`, and `fedex` labels are used to automatically assign a sub label to car objects. `amazon`, `ups`, and `fedex` labels are used to automatically assign a sub label to car objects.
![Fedex Attribute](/img/plus/attribute-example-fedex.jpg) ![Fedex Attribute](/img/plus/attribute-example-fedex.jpg)
## Properly labeling images
For the best results, follow the following guidelines.
**Label every object in the image**: It is important that you label all objects in each image before verifying. If you don't label a car for example, the model will be taught that part of the image is _not_ a car and it will start to get confused.
**Make tight bounding boxes**: Tighter bounding boxes improve the recognition and ensure that accurate bounding boxes are predicted at runtime.
**Label the full object even when occluded**: If you have a person standing behind a car, label the full person even though a portion of their body may be hidden behind the car. This helps predict accurate bounding boxes and improves zone accuracy and filters at runtime.
**`amazon`, `ups`, and `fedex` should label the logo**: For a Fedex truck, label the truck as a `car` and make a different bounding box just for the Fedex logo. If there are multiple logos, label each of them.
![Fedex Logo](/img/plus/fedex-logo.jpg)
## Improving your model
You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. This may be because your cameras don't look quite enough like the user submissions that were used to train the base model. Over time, this will improve as more and more users (including you) submit examples to Frigate+.
False positives can be reduced by submitting **both** true positives and false positives. This will help the model differentiate between what is and isn't correct.
You may find that it's helpful to lower your thresholds a little in order to generate more false/true positives near the threshold value. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores.
Note that only verified images will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
In order to request your first model, you will need to have annotated and verified at least 10 images. Each subsequent model request will require that 10 additional images are verified. However, this is the bare minimum. For the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night.
As circumstances change, you may need to submit new examples to address new types of false positives. For example, the change from summer days to snowy winter days or other changes such as a new grill or patio furniture may require additional examples and training.

View File

@@ -1,48 +0,0 @@
---
id: edgetpu
title: Troubleshooting EdgeTPU
---
## USB Coral Not Detected
There are many possible causes for a USB coral not being detected and some are OS specific. It is important to understand how the USB coral works:
1. When the device is first plugged in and has not initialized it will appear as `1a6e:089a Global Unichip Corp.` when running `lsusb` or checking the hardware page in HA OS.
2. Once initialized, the device will appear as `18d1:9302 Google Inc.` when running `lsusb` or checking the hardware page in HA OS.
If the coral does not initialize then Frigate can not interface with it. Some common reasons for the USB based Coral not initializing are:
### Not Enough Power
The USB coral can draw up to 900mA and this can be too much for some on-device USB ports, especially for small board computers like the RPi. If the coral is not initializing then some recommended steps are:
1. Try a different port, some ports are capable of providing more power than others.
2. Make sure the port is USB3, this is important for power and to ensure the coral runs at max speed.
3. Try a different cable, some users have found the included cable to not work well.
4. Use an externally powered USB hub.
### Incorrect Device Access
The USB coral has different IDs when it is uninitialized and initialized.
- When running Frigate in a VM, Proxmox lxc, etc. you must ensure both device IDs are mapped.
- When running HA OS you may need to run the Full Access version of the Frigate addon with the `Protected Mode` switch disabled so that the coral can be accessed.
## USB Coral Detection Appears to be Stuck
The USB Coral can become stuck and need to be restarted, this can happen for a number of reasons depending on hardware and software setup. Some common reasons are:
1. Some users have found the cable included with the coral to cause this problem and that switching to a different cable fixed it entirely.
2. Running Frigate in a VM may cause communication with the device to be lost and need to be reset.
## PCIe Coral Not Detected
The most common reason for the PCIe coral not being detected is that the driver has not been installed. See [the coral docs(https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) for how to install the driver for the PCIe based coral.
## Only One PCIe Coral Is Detected With Coral Dual EdgeTPU
Coral Dual EdgeTPU is one card with two identical TPU cores. Each core has it's own PCIe interface and motherboard needs to have two PCIe busses on the m.2 slot to make them both work.
E-key slot implemented to full m.2 electomechanical specification has two PCIe busses. Most motherboard manufacturers implement only one PCIe bus in m.2 E-key connector (this is why only one TPU is working). Some SBCs can have only USB bus on m.2 connector, ie none of TPUs will work.
In this case it is recommended to use a Dual EdgeTPU Adapter [like the one from MagicBlueSmoke](https://github.com/magic-blue-smoke/Dual-Edge-TPU-Adapter)

View File

@@ -3,7 +3,7 @@ id: recordings
title: Troubleshooting Recordings title: Troubleshooting Recordings
--- ---
### WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest... ## `WARNING : Unable to keep up with recording segments in cache for {camera}. Keeping the 5 most recent segments out of 6 and discarding the rest...`
This error can be caused by a number of different issues. The first step in troubleshooting is to enable debug logging for recording, this will enable logging showing how long it takes for recordings to be moved from RAM cache to the disk. This error can be caused by a number of different issues. The first step in troubleshooting is to enable debug logging for recording, this will enable logging showing how long it takes for recordings to be moved from RAM cache to the disk.
@@ -21,18 +21,18 @@ DEBUG : Copied /media/frigate/recordings/{segment_path} in 0.2 seconds.
It is important to let this run until the errors begin to happen, to confirm that there is not a slow down in the disk at the time of the error. It is important to let this run until the errors begin to happen, to confirm that there is not a slow down in the disk at the time of the error.
#### Copy Times > 1 second ### Copy Times > 1 second
If the storage is too slow to keep up with the recordings then the maintainer will fall behind and purge the oldest recordings to ensure the cache does not fill up causing a crash. In this case it is important to diagnose why the copy times are slow. If the storage is too slow to keep up with the recordings then the maintainer will fall behind and purge the oldest recordings to ensure the cache does not fill up causing a crash. In this case it is important to diagnose why the copy times are slow.
##### Check Storage Type #### Check Storage Type
Mounting a network share is a popular option for storing Recordings, but this can lead to reduced copy times and cause problems. Some users have found that using `NFS` instead of `SMB` considerably decreased the copy times and fixed the issue. It is also important to ensure that the network connection between the device running Frigate and the network share is stable and fast. Mounting a network share is a popular option for storing Recordings, but this can lead to reduced copy times and cause problems. Some users have found that using `NFS` instead of `SMB` considerably decreased the copy times and fixed the issue. It is also important to ensure that the network connection between the device running Frigate and the network share is stable and fast.
##### Check mount options #### Check mount options
Some users found that mounting a drive via `fstab` with the `sync` option caused dramatically reduce performance and led to this issue. Using `async` instead greatly reduced copy times. Some users found that mounting a drive via `fstab` with the `sync` option caused dramatically reduce performance and led to this issue. Using `async` instead greatly reduced copy times.
#### Copy Times < 1 second ### Copy Times < 1 second
If the storage is working quickly then this error may be caused by CPU load on the machine being too high for Frigate to have the resources to keep up. Try temporarily shutting down other services to see if the issue improves. If the storage is working quickly then this error may be caused by CPU load on the machine being too high for Frigate to have the resources to keep up. Try temporarily shutting down other services to see if the issue improves.

View File

@@ -1,77 +1,70 @@
const path = require("path"); const path = require('path');
module.exports = { module.exports = {
title: "Frigate", title: 'Frigate',
tagline: "NVR With Realtime Object Detection for IP Cameras", tagline: 'NVR With Realtime Object Detection for IP Cameras',
url: "https://docs.frigate.video", url: 'https://docs.frigate.video',
baseUrl: "/", baseUrl: '/',
onBrokenLinks: "throw", onBrokenLinks: 'throw',
onBrokenMarkdownLinks: "warn", onBrokenMarkdownLinks: 'warn',
favicon: "img/favicon.ico", favicon: 'img/favicon.ico',
organizationName: "blakeblackshear", organizationName: 'blakeblackshear',
projectName: "frigate", projectName: 'frigate',
themes: ["@docusaurus/theme-mermaid"],
markdown: {
mermaid: true,
},
themeConfig: { themeConfig: {
algolia: { algolia: {
appId: "WIURGBNBPY", appId: 'WIURGBNBPY',
apiKey: "d02cc0a6a61178b25da550212925226b", apiKey: 'd02cc0a6a61178b25da550212925226b',
indexName: "frigate", indexName: 'frigate',
}, },
docs: { docs: {
sidebar: { sidebar: {
hideable: true, hideable: true,
}, }
},
prism: {
additionalLanguages: ["bash", "json"],
}, },
navbar: { navbar: {
title: "Frigate", title: 'Frigate',
logo: { logo: {
alt: "Frigate", alt: 'Frigate',
src: "img/logo.svg", src: 'img/logo.svg',
srcDark: "img/logo-dark.svg", srcDark: 'img/logo-dark.svg',
}, },
items: [ items: [
{ {
to: "/", to: '/',
activeBasePath: "docs", activeBasePath: 'docs',
label: "Docs", label: 'Docs',
position: "left", position: 'left',
}, },
{ {
href: "https://frigate.video", href: 'https://frigate.video',
label: "Website", label: 'Website',
position: "right", position: 'right',
}, },
{ {
href: "http://demo.frigate.video", href: 'http://demo.frigate.video',
label: "Demo", label: 'Demo',
position: "right", position: 'right',
}, },
{ {
href: "https://github.com/blakeblackshear/frigate", href: 'https://github.com/blakeblackshear/frigate',
label: "GitHub", label: 'GitHub',
position: "right", position: 'right',
}, },
], ],
}, },
footer: { footer: {
style: "dark", style: 'dark',
links: [ links: [
{ {
title: "Community", title: 'Community',
items: [ items: [
{ {
label: "GitHub", label: 'GitHub',
href: "https://github.com/blakeblackshear/frigate", href: 'https://github.com/blakeblackshear/frigate',
}, },
{ {
label: "Discussions", label: 'Discussions',
href: "https://github.com/blakeblackshear/frigate/discussions", href: 'https://github.com/blakeblackshear/frigate/discussions',
}, },
], ],
}, },
@@ -79,22 +72,21 @@ module.exports = {
copyright: `Copyright © ${new Date().getFullYear()} Blake Blackshear`, copyright: `Copyright © ${new Date().getFullYear()} Blake Blackshear`,
}, },
}, },
plugins: [path.resolve(__dirname, "plugins", "raw-loader")], plugins: [path.resolve(__dirname, 'plugins', 'raw-loader')],
presets: [ presets: [
[ [
"@docusaurus/preset-classic", '@docusaurus/preset-classic',
{ {
docs: { docs: {
routeBasePath: "/", routeBasePath: '/',
sidebarPath: require.resolve("./sidebars.js"), sidebarPath: require.resolve('./sidebars.js'),
// Please change this to your repo. // Please change this to your repo.
editUrl: editUrl: 'https://github.com/blakeblackshear/frigate/edit/master/docs/',
"https://github.com/blakeblackshear/frigate/edit/master/docs/", sidebarCollapsible: false
sidebarCollapsible: false,
}, },
theme: { theme: {
customCss: require.resolve("./src/css/custom.css"), customCss: require.resolve('./src/css/custom.css'),
}, },
}, },
], ],

19914
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -14,15 +14,14 @@
"write-heading-ids": "docusaurus write-heading-ids" "write-heading-ids": "docusaurus write-heading-ids"
}, },
"dependencies": { "dependencies": {
"@docusaurus/core": "^3.0.0", "@docusaurus/core": "^2.4.1",
"@docusaurus/preset-classic": "^3.0.0", "@docusaurus/preset-classic": "^2.4.1",
"@docusaurus/theme-mermaid": "^3.0.0", "@mdx-js/react": "^1.6.22",
"@mdx-js/react": "^3.0.0", "clsx": "^1.2.1",
"clsx": "^2.0.0", "prism-react-renderer": "^1.3.5",
"prism-react-renderer": "^2.1.0",
"raw-loader": "^4.0.2", "raw-loader": "^4.0.2",
"react": "^18.2.0", "react": "^17.0.2",
"react-dom": "^18.2.0" "react-dom": "^17.0.2"
}, },
"browserslist": { "browserslist": {
"production": [ "production": [
@@ -37,11 +36,10 @@
] ]
}, },
"devDependencies": { "devDependencies": {
"@docusaurus/module-type-aliases": "^3.0.0", "@docusaurus/module-type-aliases": "^2.4.0",
"@docusaurus/types": "^3.0.0", "@types/react": "^17.0.0"
"@types/react": "^18.2.29"
}, },
"engines": { "engines": {
"node": ">=18.0" "node": ">=16.14"
} }
} }

View File

@@ -5,7 +5,6 @@ module.exports = {
"frigate/hardware", "frigate/hardware",
"frigate/installation", "frigate/installation",
"frigate/camera_setup", "frigate/camera_setup",
"frigate/video_pipeline",
], ],
Guides: [ Guides: [
"guides/getting_started", "guides/getting_started",
@@ -22,7 +21,7 @@ module.exports = {
{ {
type: "link", type: "link",
label: "Go2RTC Configuration Reference", label: "Go2RTC Configuration Reference",
href: "https://github.com/AlexxIT/go2rtc/tree/v1.8.4#configuration", href: "https://github.com/AlexxIT/go2rtc/tree/v1.8.1#configuration",
}, },
], ],
Detectors: [ Detectors: [
@@ -33,7 +32,6 @@ module.exports = {
"configuration/cameras", "configuration/cameras",
"configuration/record", "configuration/record",
"configuration/snapshots", "configuration/snapshots",
"configuration/motion_detection",
"configuration/birdseye", "configuration/birdseye",
"configuration/live", "configuration/live",
"configuration/restream", "configuration/restream",
@@ -60,11 +58,7 @@ module.exports = {
"integrations/third_party_extensions", "integrations/third_party_extensions",
], ],
"Frigate+": ["plus/index"], "Frigate+": ["plus/index"],
Troubleshooting: [ Troubleshooting: ["troubleshooting/faqs", "troubleshooting/recordings"],
"troubleshooting/faqs",
"troubleshooting/recordings",
"troubleshooting/edgetpu",
],
Development: [ Development: [
"development/contributing", "development/contributing",
"development/contributing-boards", "development/contributing-boards",

View File

@@ -191,8 +191,7 @@ class FrigateApp:
"i", "i",
self.config.cameras[camera_name].onvif.autotracking.enabled, self.config.cameras[camera_name].onvif.autotracking.enabled,
), ),
"ptz_tracking_active": mp.Event(), "ptz_stopped": mp.Event(),
"ptz_motor_stopped": mp.Event(),
"ptz_reset": mp.Event(), "ptz_reset": mp.Event(),
"ptz_start_time": mp.Value("d", 0.0), # type: ignore[typeddict-item] "ptz_start_time": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799 # issue https://github.com/python/typeshed/issues/8799
@@ -213,7 +212,7 @@ class FrigateApp:
# issue https://github.com/python/typeshed/issues/8799 # issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards # from mypy 0.981 onwards
} }
self.ptz_metrics[camera_name]["ptz_motor_stopped"].set() self.ptz_metrics[camera_name]["ptz_stopped"].set()
self.feature_metrics[camera_name] = { self.feature_metrics[camera_name] = {
"audio_enabled": mp.Value( # type: ignore[typeddict-item] "audio_enabled": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799 # issue https://github.com/python/typeshed/issues/8799
@@ -279,17 +278,6 @@ class FrigateApp:
except PermissionError: except PermissionError:
logger.error("Unable to write to /config to save DB state") logger.error("Unable to write to /config to save DB state")
def cleanup_timeline_db(db: SqliteExtDatabase) -> None:
db.execute_sql(
"DELETE FROM timeline WHERE source_id NOT IN (SELECT id FROM event);"
)
try:
with open(f"{CONFIG_DIR}/.timeline", "w") as f:
f.write(str(datetime.datetime.now().timestamp()))
except PermissionError:
logger.error("Unable to write to /config to save DB state")
# Migrate DB location # Migrate DB location
old_db_path = DEFAULT_DB_PATH old_db_path = DEFAULT_DB_PATH
if not os.path.isfile(self.config.database.path) and os.path.isfile( if not os.path.isfile(self.config.database.path) and os.path.isfile(
@@ -305,11 +293,6 @@ class FrigateApp:
router = Router(migrate_db) router = Router(migrate_db)
router.run() router.run()
# this is a temporary check to clean up user DB from beta
# will be removed before final release
if not os.path.exists(f"{CONFIG_DIR}/.timeline"):
cleanup_timeline_db(migrate_db)
# check if vacuum needs to be run # check if vacuum needs to be run
if os.path.exists(f"{CONFIG_DIR}/.vacuum"): if os.path.exists(f"{CONFIG_DIR}/.vacuum"):
with open(f"{CONFIG_DIR}/.vacuum") as f: with open(f"{CONFIG_DIR}/.vacuum") as f:
@@ -461,7 +444,6 @@ class FrigateApp:
self.config, self.config,
self.onvif_controller, self.onvif_controller,
self.ptz_metrics, self.ptz_metrics,
self.dispatcher,
self.stop_event, self.stop_event,
) )
self.ptz_autotracker_thread.start() self.ptz_autotracker_thread.start()
@@ -503,9 +485,7 @@ class FrigateApp:
# create or update region grids for each camera # create or update region grids for each camera
for camera in self.config.cameras.values(): for camera in self.config.cameras.values():
self.region_grids[camera.name] = get_camera_regions_grid( self.region_grids[camera.name] = get_camera_regions_grid(
camera.name, camera.name, camera.detect
camera.detect,
max(self.config.model.width, self.config.model.height),
) )
def start_camera_processors(self) -> None: def start_camera_processors(self) -> None:

View File

@@ -96,11 +96,7 @@ class Dispatcher:
elif topic == REQUEST_REGION_GRID: elif topic == REQUEST_REGION_GRID:
camera = payload camera = payload
self.camera_metrics[camera]["region_grid_queue"].put( self.camera_metrics[camera]["region_grid_queue"].put(
get_camera_regions_grid( get_camera_regions_grid(camera, self.config.cameras[camera].detect)
camera,
self.config.cameras[camera].detect,
max(self.config.model.width, self.config.model.height),
)
) )
else: else:
self.publish(topic, payload, retain=False) self.publish(topic, payload, retain=False)
@@ -185,13 +181,6 @@ class Dispatcher:
ptz_autotracker_settings = self.config.cameras[camera_name].onvif.autotracking ptz_autotracker_settings = self.config.cameras[camera_name].onvif.autotracking
if payload == "ON": if payload == "ON":
if not self.config.cameras[
camera_name
].onvif.autotracking.enabled_in_config:
logger.error(
"Autotracking must be enabled in the config to be turned on via MQTT."
)
return
if not self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value: if not self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value:
logger.info(f"Turning on ptz autotracker for {camera_name}") logger.info(f"Turning on ptz autotracker for {camera_name}")
self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value = True self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value = True

View File

@@ -71,7 +71,7 @@ class MqttClient(Communicator): # type: ignore[misc]
) )
self.publish( self.publish(
f"{camera_name}/ptz_autotracker/state", f"{camera_name}/ptz_autotracker/state",
"ON" if camera.onvif.autotracking.enabled_in_config else "OFF", "ON" if camera.onvif.autotracking.enabled else "OFF",
retain=True, retain=True,
) )
self.publish( self.publish(

View File

@@ -1,6 +1,5 @@
"""Websocket communicator.""" """Websocket communicator."""
import errno
import json import json
import logging import logging
import threading import threading
@@ -13,7 +12,7 @@ from ws4py.server.wsgirefserver import (
WSGIServer, WSGIServer,
) )
from ws4py.server.wsgiutils import WebSocketWSGIApplication from ws4py.server.wsgiutils import WebSocketWSGIApplication
from ws4py.websocket import WebSocket as WebSocket_ from ws4py.websocket import WebSocket
from frigate.comms.dispatcher import Communicator from frigate.comms.dispatcher import Communicator
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
@@ -21,18 +20,6 @@ from frigate.config import FrigateConfig
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class WebSocket(WebSocket_):
def unhandled_error(self, error):
"""
Handles the unfriendly socket closures on the server side
without showing a confusing error message
"""
if hasattr(error, "errno") and error.errno == errno.ECONNRESET:
pass
else:
logging.getLogger("ws4py").exception("Failed to receive data")
class WebSocketClient(Communicator): # type: ignore[misc] class WebSocketClient(Communicator): # type: ignore[misc]
"""Frigate wrapper for ws client.""" """Frigate wrapper for ws client."""

View File

@@ -5,7 +5,6 @@ import json
import logging import logging
import os import os
from enum import Enum from enum import Enum
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union from typing import Dict, List, Optional, Tuple, Union
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
@@ -17,9 +16,7 @@ from frigate.const import (
ALL_ATTRIBUTE_LABELS, ALL_ATTRIBUTE_LABELS,
AUDIO_MIN_CONFIDENCE, AUDIO_MIN_CONFIDENCE,
CACHE_DIR, CACHE_DIR,
CACHE_SEGMENT_FORMAT,
DEFAULT_DB_PATH, DEFAULT_DB_PATH,
MAX_PRE_CAPTURE,
REGEX_CAMERA_NAME, REGEX_CAMERA_NAME,
YAML_EXT, YAML_EXT,
) )
@@ -50,13 +47,6 @@ DEFAULT_TIME_FORMAT = "%m/%d/%Y %H:%M:%S"
# DEFAULT_TIME_FORMAT = "%d.%m.%Y %H:%M:%S" # DEFAULT_TIME_FORMAT = "%d.%m.%Y %H:%M:%S"
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")} FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
# read docker secret files as env vars too
if os.path.isdir("/run/secrets"):
for secret_file in os.listdir("/run/secrets"):
if secret_file.startswith("FRIGATE_"):
FRIGATE_ENV_VARS[secret_file] = Path(
os.path.join("/run/secrets", secret_file)
).read_text()
DEFAULT_TRACKED_OBJECTS = ["person"] DEFAULT_TRACKED_OBJECTS = ["person"]
DEFAULT_LISTEN_AUDIO = ["bark", "fire_alarm", "scream", "speech", "yell"] DEFAULT_LISTEN_AUDIO = ["bark", "fire_alarm", "scream", "speech", "yell"]
@@ -181,13 +171,10 @@ class PtzAutotrackConfig(FrigateBaseModel):
timeout: int = Field( timeout: int = Field(
default=10, title="Seconds to delay before returning to preset." default=10, title="Seconds to delay before returning to preset."
) )
movement_weights: Optional[Union[str, List[str]]] = Field( movement_weights: Optional[Union[float, List[float]]] = Field(
default=[], default=[],
title="Internal value used for PTZ movements based on the speed of your camera's motor.", title="Internal value used for PTZ movements based on the speed of your camera's motor.",
) )
enabled_in_config: Optional[bool] = Field(
title="Keep track of original state of autotracking."
)
@validator("movement_weights", pre=True) @validator("movement_weights", pre=True)
def validate_weights(cls, v): def validate_weights(cls, v):
@@ -233,9 +220,7 @@ class RetainConfig(FrigateBaseModel):
class EventsConfig(FrigateBaseModel): class EventsConfig(FrigateBaseModel):
pre_capture: int = Field( pre_capture: int = Field(default=5, title="Seconds to retain before event starts.")
default=5, title="Seconds to retain before event starts.", le=MAX_PRE_CAPTURE
)
post_capture: int = Field(default=5, title="Seconds to retain after event ends.") post_capture: int = Field(default=5, title="Seconds to retain after event ends.")
required_zones: List[str] = Field( required_zones: List[str] = Field(
default_factory=list, default_factory=list,
@@ -262,8 +247,8 @@ class RecordExportConfig(FrigateBaseModel):
class RecordConfig(FrigateBaseModel): class RecordConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable record on all cameras.") enabled: bool = Field(default=False, title="Enable record on all cameras.")
sync_recordings: bool = Field( sync_on_startup: bool = Field(
default=False, title="Sync recordings with disk on startup and once a day." default=False, title="Sync recordings with disk on startup."
) )
expire_interval: int = Field( expire_interval: int = Field(
default=60, default=60,
@@ -367,9 +352,6 @@ class DetectConfig(FrigateBaseModel):
default=5, title="Number of frames per second to process through detection." default=5, title="Number of frames per second to process through detection."
) )
enabled: bool = Field(default=True, title="Detection Enabled.") enabled: bool = Field(default=True, title="Detection Enabled.")
min_initialized: Optional[int] = Field(
title="Minimum number of consecutive hits for an object to be initialized by the tracker."
)
max_disappeared: Optional[int] = Field( max_disappeared: Optional[int] = Field(
title="Maximum number of frames the object can dissapear before detection ends." title="Maximum number of frames the object can dissapear before detection ends."
) )
@@ -746,9 +728,6 @@ class CameraConfig(FrigateBaseModel):
default=60, default=60,
title="How long to wait for the image with the highest confidence score.", title="How long to wait for the image with the highest confidence score.",
) )
webui_url: Optional[str] = Field(
title="URL to visit the camera directly from system page",
)
zones: Dict[str, ZoneConfig] = Field( zones: Dict[str, ZoneConfig] = Field(
default_factory=dict, title="Zone configuration." default_factory=dict, title="Zone configuration."
) )
@@ -872,7 +851,7 @@ class CameraConfig(FrigateBaseModel):
ffmpeg_output_args = ( ffmpeg_output_args = (
record_args record_args
+ [f"{os.path.join(CACHE_DIR, self.name)}@{CACHE_SEGMENT_FORMAT}.mp4"] + [f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4"]
+ ffmpeg_output_args + ffmpeg_output_args
) )
@@ -1164,11 +1143,6 @@ class FrigateConfig(FrigateBaseModel):
else DEFAULT_DETECT_DIMENSIONS["height"] else DEFAULT_DETECT_DIMENSIONS["height"]
) )
# Default min_initialized configuration
min_initialized = camera_config.detect.fps / 2
if camera_config.detect.min_initialized is None:
camera_config.detect.min_initialized = min_initialized
# Default max_disappeared configuration # Default max_disappeared configuration
max_disappeared = camera_config.detect.fps * 5 max_disappeared = camera_config.detect.fps * 5
if camera_config.detect.max_disappeared is None: if camera_config.detect.max_disappeared is None:
@@ -1197,9 +1171,6 @@ class FrigateConfig(FrigateBaseModel):
# set config pre-value # set config pre-value
camera_config.record.enabled_in_config = camera_config.record.enabled camera_config.record.enabled_in_config = camera_config.record.enabled
camera_config.audio.enabled_in_config = camera_config.audio.enabled camera_config.audio.enabled_in_config = camera_config.audio.enabled
camera_config.onvif.autotracking.enabled_in_config = (
camera_config.onvif.autotracking.enabled
)
# Add default filters # Add default filters
object_keys = camera_config.objects.track object_keys = camera_config.objects.track

View File

@@ -50,10 +50,7 @@ DRIVER_INTEL_iHD = "iHD"
# Record Values # Record Values
CACHE_SEGMENT_FORMAT = "%Y%m%d%H%M%S%z"
MAX_PRE_CAPTURE = 60
MAX_SEGMENT_DURATION = 600 MAX_SEGMENT_DURATION = 600
MAX_SEGMENTS_IN_CACHE = 6
MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to account for cameras with inconsistent segment times MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to account for cameras with inconsistent segment times
# Internal Comms Topics # Internal Comms Topics
@@ -63,7 +60,7 @@ REQUEST_REGION_GRID = "request_region_grid"
# Autotracking # Autotracking
AUTOTRACKING_MAX_AREA_RATIO = 0.6 AUTOTRACKING_MAX_AREA_RATIO = 0.5
AUTOTRACKING_MOTION_MIN_DISTANCE = 20 AUTOTRACKING_MOTION_MIN_DISTANCE = 20
AUTOTRACKING_MOTION_MAX_POINTS = 500 AUTOTRACKING_MOTION_MAX_POINTS = 500
AUTOTRACKING_MAX_MOVE_METRICS = 500 AUTOTRACKING_MAX_MOVE_METRICS = 500

View File

@@ -49,18 +49,12 @@ class DeepStack(DetectionApi):
image.save(output, format="JPEG") image.save(output, format="JPEG")
image_bytes = output.getvalue() image_bytes = output.getvalue()
data = {"api_key": self.api_key} data = {"api_key": self.api_key}
try:
response = requests.post( response = requests.post(
self.api_url, self.api_url,
data=data, data=data,
files={"image": image_bytes}, files={"image": image_bytes},
timeout=self.api_timeout, timeout=self.api_timeout,
) )
except requests.exceptions.RequestException:
logger.error("Error calling deepstack API")
return np.zeros((20, 6), np.float32)
response_json = response.json() response_json = response.json()
detections = np.zeros((20, 6), np.float32) detections = np.zeros((20, 6), np.float32)
if response_json.get("predictions") is None: if response_json.get("predictions") is None:

View File

@@ -1,205 +0,0 @@
import logging
import os.path
import urllib.request
from typing import Literal
import numpy as np
try:
from hide_warnings import hide_warnings
except: # noqa: E722
def hide_warnings(func):
pass
from pydantic import Field
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
logger = logging.getLogger(__name__)
DETECTOR_KEY = "rknn"
supported_socs = ["rk3562", "rk3566", "rk3568", "rk3588"]
yolov8_suffix = {
"default-yolov8n": "n",
"default-yolov8s": "s",
"default-yolov8m": "m",
"default-yolov8l": "l",
"default-yolov8x": "x",
}
class RknnDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
core_mask: int = Field(default=0, ge=0, le=7, title="Core mask for NPU.")
class Rknn(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, config: RknnDetectorConfig):
# create symlink for Home Assistant add on
if not os.path.isfile("/proc/device-tree/compatible"):
if os.path.isfile("/device-tree/compatible"):
os.symlink("/device-tree/compatible", "/proc/device-tree/compatible")
# find out SoC
try:
with open("/proc/device-tree/compatible") as file:
soc = file.read().split(",")[-1].strip("\x00")
except FileNotFoundError:
logger.error("Make sure to run docker in privileged mode.")
raise Exception("Make sure to run docker in privileged mode.")
if soc not in supported_socs:
logger.error(
"Your SoC is not supported. Your SoC is: {}. Currently these SoCs are supported: {}.".format(
soc, supported_socs
)
)
raise Exception(
"Your SoC is not supported. Your SoC is: {}. Currently these SoCs are supported: {}.".format(
soc, supported_socs
)
)
if not os.path.isfile("/usr/lib/librknnrt.so"):
if "rk356" in soc:
os.rename("/usr/lib/librknnrt_rk356x.so", "/usr/lib/librknnrt.so")
elif "rk3588" in soc:
os.rename("/usr/lib/librknnrt_rk3588.so", "/usr/lib/librknnrt.so")
self.model_path = config.model.path or "default-yolov8n"
self.core_mask = config.core_mask
self.height = config.model.height
self.width = config.model.width
if self.model_path in yolov8_suffix:
if self.model_path == "default-yolov8n":
self.model_path = "/models/rknn/yolov8n-320x320-{soc}.rknn".format(
soc=soc
)
else:
model_suffix = yolov8_suffix[self.model_path]
self.model_path = (
"/config/model_cache/rknn/yolov8{suffix}-320x320-{soc}.rknn".format(
suffix=model_suffix, soc=soc
)
)
os.makedirs("/config/model_cache/rknn", exist_ok=True)
if not os.path.isfile(self.model_path):
logger.info(
"Downloading yolov8{suffix} model.".format(suffix=model_suffix)
)
urllib.request.urlretrieve(
"https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-{soc}/yolov8{suffix}-320x320-{soc}.rknn".format(
soc=soc, suffix=model_suffix
),
self.model_path,
)
if (config.model.width != 320) or (config.model.height != 320):
logger.error(
"Make sure to set the model width and heigth to 320 in your config.yml."
)
raise Exception(
"Make sure to set the model width and heigth to 320 in your config.yml."
)
if config.model.input_pixel_format != "bgr":
logger.error(
'Make sure to set the model input_pixel_format to "bgr" in your config.yml.'
)
raise Exception(
'Make sure to set the model input_pixel_format to "bgr" in your config.yml.'
)
if config.model.input_tensor != "nhwc":
logger.error(
'Make sure to set the model input_tensor to "nhwc" in your config.yml.'
)
raise Exception(
'Make sure to set the model input_tensor to "nhwc" in your config.yml.'
)
from rknnlite.api import RKNNLite
self.rknn = RKNNLite(verbose=False)
if self.rknn.load_rknn(self.model_path) != 0:
logger.error("Error initializing rknn model.")
if self.rknn.init_runtime(core_mask=self.core_mask) != 0:
logger.error(
"Error initializing rknn runtime. Do you run docker in privileged mode?"
)
def __del__(self):
self.rknn.release()
def postprocess(self, results):
"""
Processes yolov8 output.
Args:
results: array with shape: (1, 84, n, 1) where n depends on yolov8 model size (for 320x320 model n=2100)
Returns:
detections: array with shape (20, 6) with 20 rows of (class, confidence, y_min, x_min, y_max, x_max)
"""
results = np.transpose(results[0, :, :, 0]) # array shape (2100, 84)
scores = np.max(
results[:, 4:], axis=1
) # array shape (2100,); max confidence of each row
# remove lines with score scores < 0.4
filtered_arg = np.argwhere(scores > 0.4)
results = results[filtered_arg[:, 0]]
scores = scores[filtered_arg[:, 0]]
num_detections = len(scores)
if num_detections == 0:
return np.zeros((20, 6), np.float32)
if num_detections > 20:
top_arg = np.argpartition(scores, -20)[-20:]
results = results[top_arg]
scores = scores[top_arg]
num_detections = 20
classes = np.argmax(results[:, 4:], axis=1)
boxes = np.transpose(
np.vstack(
(
(results[:, 1] - 0.5 * results[:, 3]) / self.height,
(results[:, 0] - 0.5 * results[:, 2]) / self.width,
(results[:, 1] + 0.5 * results[:, 3]) / self.height,
(results[:, 0] + 0.5 * results[:, 2]) / self.width,
)
)
)
detections = np.zeros((20, 6), np.float32)
detections[:num_detections, 0] = classes
detections[:num_detections, 1] = scores
detections[:num_detections, 2:] = boxes
return detections
@hide_warnings
def inference(self, tensor_input):
return self.rknn.inference(inputs=tensor_input)
def detect_raw(self, tensor_input):
output = self.inference(
[
tensor_input,
]
)
return self.postprocess(output[0])

View File

@@ -303,7 +303,6 @@ class TensorRtDetector(DetectionApi):
ordered[:, 3] = np.clip(ordered[:, 3] + ordered[:, 1], 0, 1) ordered[:, 3] = np.clip(ordered[:, 3] + ordered[:, 1], 0, 1)
# put result into the correct order and limit to top 20 # put result into the correct order and limit to top 20
detections = ordered[:, [5, 4, 1, 0, 3, 2]][:20] detections = ordered[:, [5, 4, 1, 0, 3, 2]][:20]
# pad to 20x6 shape # pad to 20x6 shape
append_cnt = 20 - len(detections) append_cnt = 20 - len(detections)
if append_cnt > 0: if append_cnt > 0:

View File

@@ -43,9 +43,9 @@ def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]:
ffmpeg_input: CameraInput = [i for i in ffmpeg.inputs if "audio" in i.roles][0] ffmpeg_input: CameraInput = [i for i in ffmpeg.inputs if "audio" in i.roles][0]
input_args = get_ffmpeg_arg_list(ffmpeg.global_args) + ( input_args = get_ffmpeg_arg_list(ffmpeg.global_args) + (
parse_preset_input(ffmpeg_input.input_args, 1) parse_preset_input(ffmpeg_input.input_args, 1)
or get_ffmpeg_arg_list(ffmpeg_input.input_args) or ffmpeg_input.input_args
or parse_preset_input(ffmpeg.input_args, 1) or parse_preset_input(ffmpeg.input_args, 1)
or get_ffmpeg_arg_list(ffmpeg.input_args) or ffmpeg.input_args
) )
return ( return (
["ffmpeg", "-vn"] ["ffmpeg", "-vn"]
@@ -240,10 +240,7 @@ class AudioEventMaintainer(threading.Thread):
rms = np.sqrt(np.mean(np.absolute(np.square(audio_as_float)))) rms = np.sqrt(np.mean(np.absolute(np.square(audio_as_float))))
# Transform RMS to dBFS (decibels relative to full scale) # Transform RMS to dBFS (decibels relative to full scale)
if rms > 0:
dBFS = 20 * np.log10(np.abs(rms) / AUDIO_MAX_BIT_RANGE) dBFS = 20 * np.log10(np.abs(rms) / AUDIO_MAX_BIT_RANGE)
else:
dBFS = 0
self.inter_process_communicator.queue.put( self.inter_process_communicator.queue.put(
(f"{self.config.name}/audio/dBFS", float(dBFS)) (f"{self.config.name}/audio/dBFS", float(dBFS))

View File

@@ -106,10 +106,10 @@ class ExternalEventProcessor:
# write jpg snapshot with optional annotations # write jpg snapshot with optional annotations
if draw.get("boxes") and isinstance(draw.get("boxes"), list): if draw.get("boxes") and isinstance(draw.get("boxes"), list):
for box in draw.get("boxes"): for box in draw.get("boxes"):
x = int(box["box"][0] * camera_config.detect.width) x = box["box"][0] * camera_config.detect.width
y = int(box["box"][1] * camera_config.detect.height) y = box["box"][1] * camera_config.detect.height
width = int(box["box"][2] * camera_config.detect.width) width = box["box"][2] * camera_config.detect.width
height = int(box["box"][3] * camera_config.detect.height) height = box["box"][3] * camera_config.detect.height
draw_box_with_label( draw_box_with_label(
img_frame, img_frame,

View File

@@ -55,8 +55,8 @@ _user_agent_args = [
] ]
PRESETS_HW_ACCEL_DECODE = { PRESETS_HW_ACCEL_DECODE = {
"preset-rpi-32-h264": "-c:v:1 h264_v4l2m2m",
"preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m", "preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m",
"preset-rpi-64-h265": "-c:v:1 hevc_v4l2m2m",
"preset-vaapi": f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi", "preset-vaapi": f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi",
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv", "preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv",
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v hevc_qsv", "preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v hevc_qsv",
@@ -65,28 +65,24 @@ PRESETS_HW_ACCEL_DECODE = {
"preset-nvidia-mjpeg": "-hwaccel cuda -hwaccel_output_format cuda", "preset-nvidia-mjpeg": "-hwaccel cuda -hwaccel_output_format cuda",
"preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}", "preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}",
"preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}", "preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}",
"preset-rk-h264": "-c:v h264_rkmpp_decoder",
"preset-rk-h265": "-c:v hevc_rkmpp_decoder",
} }
PRESETS_HW_ACCEL_SCALE = { PRESETS_HW_ACCEL_SCALE = {
"preset-rpi-32-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}", "preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-rpi-64-h265": "-r {0} -vf fps={0},scale={1}:{2}", "preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p",
"preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-jetson-h264": "-r {0}", # scaled in decoder "preset-jetson-h264": "-r {0}", # scaled in decoder
"preset-jetson-h265": "-r {0}", # scaled in decoder "preset-jetson-h265": "-r {0}", # scaled in decoder
"preset-rk-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-rk-h265": "-r {0} -vf fps={0},scale={1}:{2}",
"default": "-r {0} -vf fps={0},scale={1}:{2}", "default": "-r {0} -vf fps={0},scale={1}:{2}",
} }
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = { PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {
"preset-rpi-32-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}",
"preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}", "preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}",
"preset-rpi-64-h265": "ffmpeg -hide_banner {0} -c:v hevc_v4l2m2m {1}",
"preset-vaapi": "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {1}", "preset-vaapi": "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {1}",
"preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}", "preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
"preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}", "preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
@@ -94,14 +90,12 @@ PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {
"preset-nvidia-h265": "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}", "preset-nvidia-h265": "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}",
"preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}", "preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
"preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}", "preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
"preset-rk-h264": "ffmpeg -hide_banner {0} -c:v h264_rkmpp_encoder -profile high {1}",
"preset-rk-h265": "ffmpeg -hide_banner {0} -c:v hevc_rkmpp_encoder -profile high {1}",
"default": "ffmpeg -hide_banner {0} -c:v libx264 -g 50 -profile:v high -level:v 4.1 -preset:v superfast -tune:v zerolatency {1}", "default": "ffmpeg -hide_banner {0} -c:v libx264 -g 50 -profile:v high -level:v 4.1 -preset:v superfast -tune:v zerolatency {1}",
} }
PRESETS_HW_ACCEL_ENCODE_TIMELAPSE = { PRESETS_HW_ACCEL_ENCODE_TIMELAPSE = {
"preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -pix_fmt yuv420p {1}", "preset-rpi-32-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}",
"preset-rpi-64-h265": "ffmpeg -hide_banner {0} -c:v hevc_v4l2m2m -pix_fmt yuv420p {1}", "preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}",
"preset-vaapi": "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi {1}", "preset-vaapi": "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi {1}",
"preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {1}", "preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
"preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v hevc_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {1}", "preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v hevc_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
@@ -109,8 +103,6 @@ PRESETS_HW_ACCEL_ENCODE_TIMELAPSE = {
"preset-nvidia-h265": "ffmpeg -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {0} -c:v hevc_nvenc {1}", "preset-nvidia-h265": "ffmpeg -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {0} -c:v hevc_nvenc {1}",
"preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}", "preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
"preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v hevc_nvmpi -profile high {1}", "preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v hevc_nvmpi -profile high {1}",
"preset-rk-h264": "ffmpeg -hide_banner {0} -c:v h264_rkmpp_encoder -profile high {1}",
"preset-rk-h265": "ffmpeg -hide_banner {0} -c:v hevc_rkmpp_encoder -profile high {1}",
"default": "ffmpeg -hide_banner {0} -c:v libx264 -preset:v ultrafast -tune:v zerolatency {1}", "default": "ffmpeg -hide_banner {0} -c:v libx264 -preset:v ultrafast -tune:v zerolatency {1}",
} }

View File

@@ -4,7 +4,6 @@ import glob
import json import json
import logging import logging
import os import os
import re
import subprocess as sp import subprocess as sp
import time import time
import traceback import traceback
@@ -16,7 +15,6 @@ from urllib.parse import unquote
import cv2 import cv2
import numpy as np import numpy as np
import pytz import pytz
import requests
from flask import ( from flask import (
Blueprint, Blueprint,
Flask, Flask,
@@ -43,7 +41,7 @@ from frigate.const import (
RECORD_DIR, RECORD_DIR,
) )
from frigate.events.external import ExternalEventProcessor from frigate.events.external import ExternalEventProcessor
from frigate.models import Event, Recordings, Regions, Timeline from frigate.models import Event, Recordings, Timeline
from frigate.object_processing import TrackedObject from frigate.object_processing import TrackedObject
from frigate.plus import PlusApi from frigate.plus import PlusApi
from frigate.ptz.onvif import OnvifController from frigate.ptz.onvif import OnvifController
@@ -117,7 +115,7 @@ def is_healthy():
@bp.route("/events/summary") @bp.route("/events/summary")
def events_summary(): def events_summary():
tz_name = request.args.get("timezone", default="utc", type=str) tz_name = request.args.get("timezone", default="utc", type=str)
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(tz_name) hour_modifier, minute_modifier = get_tz_modifiers(tz_name)
has_clip = request.args.get("has_clip", type=int) has_clip = request.args.get("has_clip", type=int)
has_snapshot = request.args.get("has_snapshot", type=int) has_snapshot = request.args.get("has_snapshot", type=int)
@@ -151,7 +149,12 @@ def events_summary():
Event.camera, Event.camera,
Event.label, Event.label,
Event.sub_label, Event.sub_label,
(Event.start_time + seconds_offset).cast("int") / (3600 * 24), fn.strftime(
"%Y-%m-%d",
fn.datetime(
Event.start_time, "unixepoch", hour_modifier, minute_modifier
),
),
Event.zones, Event.zones,
) )
) )
@@ -723,126 +726,6 @@ def label_snapshot(camera_name, label):
return response return response
@bp.route("/<camera_name>/grid.jpg")
def grid_snapshot(camera_name):
request.args.get("type", default="region")
if camera_name in current_app.frigate_config.cameras:
detect = current_app.frigate_config.cameras[camera_name].detect
frame = current_app.detected_frames_processor.get_current_frame(camera_name, {})
retry_interval = float(
current_app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval
or 10
)
if frame is None or datetime.now().timestamp() > (
current_app.detected_frames_processor.get_current_frame_time(camera_name)
+ retry_interval
):
return make_response(
jsonify({"success": False, "message": "Unable to get valid frame"}),
500,
)
try:
grid = (
Regions.select(Regions.grid)
.where(Regions.camera == camera_name)
.get()
.grid
)
except DoesNotExist:
return make_response(
jsonify({"success": False, "message": "Unable to get region grid"}),
500,
)
color_arg = request.args.get("color", default="", type=str).lower()
draw_font_scale = request.args.get("font_scale", default=0.5, type=float)
if color_arg == "red":
draw_color = (0, 0, 255)
elif color_arg == "blue":
draw_color = (255, 0, 0)
elif color_arg == "black":
draw_color = (0, 0, 0)
elif color_arg == "white":
draw_color = (255, 255, 255)
else:
draw_color = (0, 255, 0)
grid_size = len(grid)
grid_coef = 1.0 / grid_size
width = detect.width
height = detect.height
for x in range(grid_size):
for y in range(grid_size):
cell = grid[x][y]
if len(cell["sizes"]) == 0:
continue
std_dev = round(cell["std_dev"] * width, 2)
mean = round(cell["mean"] * width, 2)
cv2.rectangle(
frame,
(int(x * grid_coef * width), int(y * grid_coef * height)),
(
int((x + 1) * grid_coef * width),
int((y + 1) * grid_coef * height),
),
draw_color,
2,
)
cv2.putText(
frame,
f"#: {len(cell['sizes'])}",
(
int(x * grid_coef * width + 10),
int((y * grid_coef + 0.02) * height),
),
cv2.FONT_HERSHEY_SIMPLEX,
fontScale=draw_font_scale,
color=draw_color,
thickness=2,
)
cv2.putText(
frame,
f"std: {std_dev}",
(
int(x * grid_coef * width + 10),
int((y * grid_coef + 0.05) * height),
),
cv2.FONT_HERSHEY_SIMPLEX,
fontScale=draw_font_scale,
color=draw_color,
thickness=2,
)
cv2.putText(
frame,
f"avg: {mean}",
(
int(x * grid_coef * width + 10),
int((y * grid_coef + 0.08) * height),
),
cv2.FONT_HERSHEY_SIMPLEX,
fontScale=draw_font_scale,
color=draw_color,
thickness=2,
)
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
response = make_response(jpg.tobytes())
response.headers["Content-Type"] = "image/jpeg"
response.headers["Cache-Control"] = "no-store"
return response
else:
return make_response(
jsonify({"success": False, "message": "Camera not found"}),
404,
)
@bp.route("/events/<id>/clip.mp4") @bp.route("/events/<id>/clip.mp4")
def event_clip(id): def event_clip(id):
download = request.args.get("download", type=bool) download = request.args.get("download", type=bool)
@@ -877,7 +760,7 @@ def event_clip(id):
response.headers["Content-Length"] = os.path.getsize(clip_path) response.headers["Content-Length"] = os.path.getsize(clip_path)
response.headers[ response.headers[
"X-Accel-Redirect" "X-Accel-Redirect"
] = f"/clips/{file_name}" # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers ] = f"/clips/{file_name}" # nginx: http://wiki.nginx.org/NginxXSendfile
return response return response
@@ -1006,7 +889,7 @@ def events():
if time_range != DEFAULT_TIME_RANGE: if time_range != DEFAULT_TIME_RANGE:
# get timezone arg to ensure browser times are used # get timezone arg to ensure browser times are used
tz_name = request.args.get("timezone", default="utc", type=str) tz_name = request.args.get("timezone", default="utc", type=str)
hour_modifier, minute_modifier, _ = get_tz_modifiers(tz_name) hour_modifier, minute_modifier = get_tz_modifiers(tz_name)
times = time_range.split(",") times = time_range.split(",")
time_after = times[0] time_after = times[0]
@@ -1063,7 +946,7 @@ def events():
if is_submitted is not None: if is_submitted is not None:
if is_submitted == 0: if is_submitted == 0:
clauses.append((Event.plus_id.is_null())) clauses.append((Event.plus_id.is_null()))
elif is_submitted > 0: else:
clauses.append((Event.plus_id != "")) clauses.append((Event.plus_id != ""))
if len(clauses) == 0: if len(clauses) == 0:
@@ -1155,9 +1038,6 @@ def end_event(event_id):
def config(): def config():
config = current_app.frigate_config.dict() config = current_app.frigate_config.dict()
# remove the mqtt password
config["mqtt"].pop("password", None)
for camera_name, camera in current_app.frigate_config.cameras.items(): for camera_name, camera in current_app.frigate_config.cameras.items():
camera_dict = config["cameras"][camera_name] camera_dict = config["cameras"][camera_name]
@@ -1346,22 +1226,6 @@ def config_schema():
) )
@bp.route("/go2rtc/streams")
def go2rtc_streams():
r = requests.get("http://127.0.0.1:1984/api/streams")
if not r.ok:
logger.error("Failed to fetch streams from go2rtc")
return make_response(
jsonify({"success": False, "message": "Error fetching stream data"}),
500,
)
stream_data = r.json()
for data in stream_data.values():
for producer in data.get("producers", []):
producer["url"] = clean_camera_user_pass(producer.get("url", ""))
return jsonify(stream_data)
@bp.route("/version") @bp.route("/version")
def version(): def version():
return VERSION return VERSION
@@ -1524,8 +1388,6 @@ def get_snapshot_from_recording(camera_name: str, frame_time: str):
) )
) )
.where(Recordings.camera == camera_name) .where(Recordings.camera == camera_name)
.order_by(Recordings.start_time.desc())
.limit(1)
) )
try: try:
@@ -1599,7 +1461,7 @@ def get_recordings_storage_usage():
@bp.route("/<camera_name>/recordings/summary") @bp.route("/<camera_name>/recordings/summary")
def recordings_summary(camera_name): def recordings_summary(camera_name):
tz_name = request.args.get("timezone", default="utc", type=str) tz_name = request.args.get("timezone", default="utc", type=str)
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(tz_name) hour_modifier, minute_modifier = get_tz_modifiers(tz_name)
recording_groups = ( recording_groups = (
Recordings.select( Recordings.select(
fn.strftime( fn.strftime(
@@ -1613,8 +1475,22 @@ def recordings_summary(camera_name):
fn.SUM(Recordings.objects).alias("objects"), fn.SUM(Recordings.objects).alias("objects"),
) )
.where(Recordings.camera == camera_name) .where(Recordings.camera == camera_name)
.group_by((Recordings.start_time + seconds_offset).cast("int") / 3600) .group_by(
.order_by(Recordings.start_time.desc()) fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Recordings.start_time, "unixepoch", hour_modifier, minute_modifier
),
)
)
.order_by(
fn.strftime(
"%Y-%m-%d H",
fn.datetime(
Recordings.start_time, "unixepoch", hour_modifier, minute_modifier
),
).desc()
)
.namedtuples() .namedtuples()
) )
@@ -1629,7 +1505,14 @@ def recordings_summary(camera_name):
fn.COUNT(Event.id).alias("count"), fn.COUNT(Event.id).alias("count"),
) )
.where(Event.camera == camera_name, Event.has_clip) .where(Event.camera == camera_name, Event.has_clip)
.group_by((Event.start_time + seconds_offset).cast("int") / 3600) .group_by(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Event.start_time, "unixepoch", hour_modifier, minute_modifier
),
),
)
.namedtuples() .namedtuples()
) )
@@ -1773,7 +1656,7 @@ def recording_clip(camera_name, start_ts, end_ts):
response.headers["Content-Length"] = os.path.getsize(path) response.headers["Content-Length"] = os.path.getsize(path)
response.headers[ response.headers[
"X-Accel-Redirect" "X-Accel-Redirect"
] = f"/cache/{file_name}" # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers ] = f"/cache/{file_name}" # nginx: http://wiki.nginx.org/NginxXSendfile
return response return response
@@ -1975,68 +1858,9 @@ def export_recording(camera_name: str, start_time, end_time):
) )
def export_filename_check_extension(filename: str):
if filename.endswith(".mp4"):
return filename
else:
return filename + ".mp4"
def export_filename_is_valid(filename: str):
if re.search(r"[^:_A-Za-z0-9]", filename) or filename.startswith("in_progress."):
return False
else:
return True
@bp.route("/export/<file_name_current>/<file_name_new>", methods=["PATCH"])
def export_rename(file_name_current, file_name_new: str):
safe_file_name_current = secure_filename(
export_filename_check_extension(file_name_current)
)
file_current = os.path.join(EXPORT_DIR, safe_file_name_current)
if not os.path.exists(file_current):
return make_response(
jsonify({"success": False, "message": f"{file_name_current} not found."}),
404,
)
if not export_filename_is_valid(file_name_new):
return make_response(
jsonify(
{
"success": False,
"message": f"{file_name_new} contains illegal characters.",
}
),
400,
)
safe_file_name_new = secure_filename(export_filename_check_extension(file_name_new))
file_new = os.path.join(EXPORT_DIR, safe_file_name_new)
if os.path.exists(file_new):
return make_response(
jsonify({"success": False, "message": f"{file_name_new} already exists."}),
400,
)
os.rename(file_current, file_new)
return make_response(
jsonify(
{
"success": True,
"message": "Successfully renamed file.",
}
),
200,
)
@bp.route("/export/<file_name>", methods=["DELETE"]) @bp.route("/export/<file_name>", methods=["DELETE"])
def export_delete(file_name: str): def export_delete(file_name: str):
safe_file_name = secure_filename(export_filename_check_extension(file_name)) safe_file_name = secure_filename(file_name)
file = os.path.join(EXPORT_DIR, safe_file_name) file = os.path.join(EXPORT_DIR, safe_file_name)
if not os.path.exists(file): if not os.path.exists(file):
@@ -2171,30 +1995,3 @@ def logs(service: str):
jsonify({"success": False, "message": "Could not find log file"}), jsonify({"success": False, "message": "Could not find log file"}),
500, 500,
) )
@bp.route("/restart", methods=["POST"])
def restart():
try:
restart_frigate()
except Exception as e:
logging.error(f"Error restarting Frigate: {e}")
return make_response(
jsonify(
{
"success": False,
"message": "Unable to restart Frigate.",
}
),
500,
)
return make_response(
jsonify(
{
"success": True,
"message": "Restarting (this can take up to one minute)...",
}
),
200,
)

View File

@@ -128,6 +128,9 @@ class TrackedObject:
self.frame = None self.frame = None
self.previous = self.to_dict() self.previous = self.to_dict()
# start the score history
self.score_history = [self.obj_data["score"]]
def _is_false_positive(self): def _is_false_positive(self):
# once a true positive, always a true positive # once a true positive, always a true positive
if not self.false_positive: if not self.false_positive:
@@ -195,7 +198,7 @@ class TrackedObject:
self.zone_presence[name] = zone_score + 1 self.zone_presence[name] = zone_score + 1
# an object is only considered present in a zone if it has a zone inertia of 3+ # an object is only considered present in a zone if it has a zone inertia of 3+
if self.zone_presence[name] >= zone.inertia: if zone_score >= zone.inertia:
current_zones.append(name) current_zones.append(name)
if name not in self.entered_zones: if name not in self.entered_zones:
@@ -245,8 +248,10 @@ class TrackedObject:
if self.obj_data["frame_time"] - self.previous["frame_time"] > 60: if self.obj_data["frame_time"] - self.previous["frame_time"] > 60:
significant_change = True significant_change = True
# update autotrack at most 3 objects per second # update autotrack at half fps
if self.obj_data["frame_time"] - self.previous["frame_time"] >= (1 / 3): if self.obj_data["frame_time"] - self.previous["frame_time"] > (
1 / (self.camera_config.detect.fps / 2)
):
autotracker_update = True autotracker_update = True
self.obj_data.update(obj_data) self.obj_data.update(obj_data)

View File

@@ -20,8 +20,8 @@ from ws4py.server.wsgirefserver import (
WSGIServer, WSGIServer,
) )
from ws4py.server.wsgiutils import WebSocketWSGIApplication from ws4py.server.wsgiutils import WebSocketWSGIApplication
from ws4py.websocket import WebSocket
from frigate.comms.ws import WebSocket
from frigate.config import BirdseyeModeEnum, FrigateConfig from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import BASE_DIR, BIRDSEYE_PIPE from frigate.const import BASE_DIR, BIRDSEYE_PIPE
from frigate.types import CameraMetricsTypes from frigate.types import CameraMetricsTypes
@@ -45,7 +45,6 @@ def get_standard_aspect_ratio(width: int, height: int) -> tuple[int, int]:
(32, 9), # panoramic cameras (32, 9), # panoramic cameras
(12, 9), (12, 9),
(9, 12), (9, 12),
(22, 15), # Amcrest, NTSC DVT
] # aspects are scaled to have common relative size ] # aspects are scaled to have common relative size
known_aspects_ratios = list( known_aspects_ratios = list(
map(lambda aspect: aspect[0] / aspect[1], known_aspects) map(lambda aspect: aspect[0] / aspect[1], known_aspects)
@@ -64,8 +63,8 @@ def get_canvas_shape(width: int, height: int) -> tuple[int, int]:
a_w, a_h = get_standard_aspect_ratio(width, height) a_w, a_h = get_standard_aspect_ratio(width, height)
if round(a_w / a_h, 2) != round(width / height, 2): if round(a_w / a_h, 2) != round(width / height, 2):
canvas_width = int(width // 4 * 4) canvas_width = width
canvas_height = int((canvas_width / a_w * a_h) // 4 * 4) canvas_height = int((canvas_width / a_w) * a_h)
logger.warning( logger.warning(
f"The birdseye resolution is a non-standard aspect ratio, forcing birdseye resolution to {canvas_width} x {canvas_height}" f"The birdseye resolution is a non-standard aspect ratio, forcing birdseye resolution to {canvas_width} x {canvas_height}"
) )
@@ -109,12 +108,9 @@ class Canvas:
return camera_aspect return camera_aspect
class FFMpegConverter(threading.Thread): class FFMpegConverter:
def __init__( def __init__(
self, self,
camera: str,
input_queue: queue.Queue,
stop_event: mp.Event,
in_width: int, in_width: int,
in_height: int, in_height: int,
out_width: int, out_width: int,
@@ -122,11 +118,6 @@ class FFMpegConverter(threading.Thread):
quality: int, quality: int,
birdseye_rtsp: bool = False, birdseye_rtsp: bool = False,
): ):
threading.Thread.__init__(self)
self.name = f"{camera}_output_converter"
self.camera = camera
self.input_queue = input_queue
self.stop_event = stop_event
self.bd_pipe = None self.bd_pipe = None
if birdseye_rtsp: if birdseye_rtsp:
@@ -176,7 +167,7 @@ class FFMpegConverter(threading.Thread):
os.close(stdin) os.close(stdin)
self.reading_birdseye = False self.reading_birdseye = False
def __write(self, b) -> None: def write(self, b) -> None:
self.process.stdin.write(b) self.process.stdin.write(b)
if self.bd_pipe: if self.bd_pipe:
@@ -212,25 +203,9 @@ class FFMpegConverter(threading.Thread):
self.process.kill() self.process.kill()
self.process.communicate() self.process.communicate()
def run(self) -> None:
while not self.stop_event.is_set():
try:
frame = self.input_queue.get(True, timeout=1)
self.__write(frame)
except queue.Empty:
pass
self.exit()
class BroadcastThread(threading.Thread): class BroadcastThread(threading.Thread):
def __init__( def __init__(self, camera, converter, websocket_server, stop_event):
self,
camera: str,
converter: FFMpegConverter,
websocket_server,
stop_event: mp.Event,
):
super(BroadcastThread, self).__init__() super(BroadcastThread, self).__init__()
self.camera = camera self.camera = camera
self.converter = converter self.converter = converter
@@ -488,7 +463,7 @@ class BirdsEyeFrameManager:
def calculate_layout(self, cameras_to_add: list[str], coefficient) -> tuple[any]: def calculate_layout(self, cameras_to_add: list[str], coefficient) -> tuple[any]:
"""Calculate the optimal layout for 2+ cameras.""" """Calculate the optimal layout for 2+ cameras."""
def map_layout(camera_layout: list[list[any]], row_height: int): def map_layout(row_height: int):
"""Map the calculated layout.""" """Map the calculated layout."""
candidate_layout = [] candidate_layout = []
starting_x = 0 starting_x = 0
@@ -517,7 +492,7 @@ class BirdsEyeFrameManager:
x + scaled_width > self.canvas.width x + scaled_width > self.canvas.width
or y + scaled_height > self.canvas.height or y + scaled_height > self.canvas.height
): ):
return x + scaled_width, y + scaled_height, None return 0, 0, None
final_row.append((cameras[0], (x, y, scaled_width, scaled_height))) final_row.append((cameras[0], (x, y, scaled_width, scaled_height)))
x += scaled_width x += scaled_width
@@ -589,21 +564,7 @@ class BirdsEyeFrameManager:
return None return None
row_height = int(self.canvas.height / coefficient) row_height = int(self.canvas.height / coefficient)
total_width, total_height, standard_candidate_layout = map_layout( total_width, total_height, standard_candidate_layout = map_layout(row_height)
camera_layout, row_height
)
if not standard_candidate_layout:
# if standard layout didn't work
# try reducing row_height by the % overflow
scale_down_percent = max(
total_width / self.canvas.width,
total_height / self.canvas.height,
)
row_height = int(row_height / scale_down_percent)
total_width, total_height, standard_candidate_layout = map_layout(
camera_layout, row_height
)
if not standard_candidate_layout: if not standard_candidate_layout:
return None return None
@@ -617,7 +578,7 @@ class BirdsEyeFrameManager:
1 / (total_height / self.canvas.height), 1 / (total_height / self.canvas.height),
) )
row_height = int(row_height * scale_up_percent) row_height = int(row_height * scale_up_percent)
_, _, scaled_layout = map_layout(camera_layout, row_height) _, _, scaled_layout = map_layout(row_height)
if scaled_layout: if scaled_layout:
return scaled_layout return scaled_layout
@@ -703,20 +664,15 @@ def output_frames(
websocket_server.initialize_websockets_manager() websocket_server.initialize_websockets_manager()
websocket_thread = threading.Thread(target=websocket_server.serve_forever) websocket_thread = threading.Thread(target=websocket_server.serve_forever)
inputs: dict[str, queue.Queue] = {}
converters = {} converters = {}
broadcasters = {} broadcasters = {}
for camera, cam_config in config.cameras.items(): for camera, cam_config in config.cameras.items():
inputs[camera] = queue.Queue(maxsize=cam_config.detect.fps)
width = int( width = int(
cam_config.live.height cam_config.live.height
* (cam_config.frame_shape[1] / cam_config.frame_shape[0]) * (cam_config.frame_shape[1] / cam_config.frame_shape[0])
) )
converters[camera] = FFMpegConverter( converters[camera] = FFMpegConverter(
camera,
inputs[camera],
stop_event,
cam_config.frame_shape[1], cam_config.frame_shape[1],
cam_config.frame_shape[0], cam_config.frame_shape[0],
width, width,
@@ -728,11 +684,7 @@ def output_frames(
) )
if config.birdseye.enabled: if config.birdseye.enabled:
inputs["birdseye"] = queue.Queue(maxsize=10)
converters["birdseye"] = FFMpegConverter( converters["birdseye"] = FFMpegConverter(
"birdseye",
inputs["birdseye"],
stop_event,
config.birdseye.width, config.birdseye.width,
config.birdseye.height, config.birdseye.height,
config.birdseye.width, config.birdseye.width,
@@ -749,9 +701,6 @@ def output_frames(
websocket_thread.start() websocket_thread.start()
for t in converters.values():
t.start()
for t in broadcasters.values(): for t in broadcasters.values():
t.start() t.start()
@@ -786,11 +735,7 @@ def output_frames(
ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager
): ):
# write to the converter for the camera if clients are listening to the specific camera # write to the converter for the camera if clients are listening to the specific camera
try: converters[camera].write(frame.tobytes())
inputs[camera].put_nowait(frame.tobytes())
except queue.Full:
# drop frames if queue is full
pass
if config.birdseye.enabled and ( if config.birdseye.enabled and (
config.birdseye.restream config.birdseye.restream
@@ -811,11 +756,7 @@ def output_frames(
if config.birdseye.restream: if config.birdseye.restream:
birdseye_buffer[:] = frame_bytes birdseye_buffer[:] = frame_bytes
try: converters["birdseye"].write(frame_bytes)
inputs["birdseye"].put_nowait(frame_bytes)
except queue.Full:
# drop frames if queue is full
pass
if camera in previous_frames: if camera in previous_frames:
frame_manager.delete(f"{camera}{previous_frames[camera]}") frame_manager.delete(f"{camera}{previous_frames[camera]}")
@@ -835,9 +776,10 @@ def output_frames(
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv) frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
frame_manager.delete(frame_id) frame_manager.delete(frame_id)
for c in converters.values():
c.exit()
for b in broadcasters.values(): for b in broadcasters.values():
b.join() b.join()
websocket_server.manager.close_all() websocket_server.manager.close_all()
websocket_server.manager.stop() websocket_server.manager.stop()
websocket_server.manager.join() websocket_server.manager.join()

View File

@@ -3,7 +3,6 @@ import json
import logging import logging
import os import os
import re import re
from pathlib import Path
from typing import Any, List from typing import Any, List
import cv2 import cv2
@@ -37,10 +36,6 @@ class PlusApi:
self.key = None self.key = None
if PLUS_ENV_VAR in os.environ: if PLUS_ENV_VAR in os.environ:
self.key = os.environ.get(PLUS_ENV_VAR) self.key = os.environ.get(PLUS_ENV_VAR)
elif os.path.isdir("/run/secrets") and PLUS_ENV_VAR in os.listdir(
"/run/secrets"
):
self.key = Path(os.path.join("/run/secrets", PLUS_ENV_VAR)).read_text()
# check for the addon options file # check for the addon options file
elif os.path.isfile("/data/options.json"): elif os.path.isfile("/data/options.json"):
with open("/data/options.json") as f: with open("/data/options.json") as f:

View File

@@ -18,7 +18,6 @@ from norfair.camera_motion import (
TranslationTransformationGetter, TranslationTransformationGetter,
) )
from frigate.comms.dispatcher import Dispatcher
from frigate.config import CameraConfig, FrigateConfig, ZoomingModeEnum from frigate.config import CameraConfig, FrigateConfig, ZoomingModeEnum
from frigate.const import ( from frigate.const import (
AUTOTRACKING_MAX_AREA_RATIO, AUTOTRACKING_MAX_AREA_RATIO,
@@ -145,14 +144,11 @@ class PtzAutoTrackerThread(threading.Thread):
config: FrigateConfig, config: FrigateConfig,
onvif: OnvifController, onvif: OnvifController,
ptz_metrics: dict[str, PTZMetricsTypes], ptz_metrics: dict[str, PTZMetricsTypes],
dispatcher: Dispatcher,
stop_event: MpEvent, stop_event: MpEvent,
) -> None: ) -> None:
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.name = "ptz_autotracker" self.name = "ptz_autotracker"
self.ptz_autotracker = PtzAutoTracker( self.ptz_autotracker = PtzAutoTracker(config, onvif, ptz_metrics)
config, onvif, ptz_metrics, dispatcher, stop_event
)
self.stop_event = stop_event self.stop_event = stop_event
self.config = config self.config = config
@@ -179,14 +175,10 @@ class PtzAutoTracker:
config: FrigateConfig, config: FrigateConfig,
onvif: OnvifController, onvif: OnvifController,
ptz_metrics: PTZMetricsTypes, ptz_metrics: PTZMetricsTypes,
dispatcher: Dispatcher,
stop_event: MpEvent,
) -> None: ) -> None:
self.config = config self.config = config
self.onvif = onvif self.onvif = onvif
self.ptz_metrics = ptz_metrics self.ptz_metrics = ptz_metrics
self.dispatcher = dispatcher
self.stop_event = stop_event
self.tracked_object: dict[str, object] = {} self.tracked_object: dict[str, object] = {}
self.tracked_object_history: dict[str, object] = {} self.tracked_object_history: dict[str, object] = {}
self.tracked_object_metrics: dict[str, object] = {} self.tracked_object_metrics: dict[str, object] = {}
@@ -208,10 +200,7 @@ class PtzAutoTracker:
continue continue
self.autotracker_init[camera] = False self.autotracker_init[camera] = False
if ( if camera_config.onvif.autotracking.enabled:
camera_config.onvif.autotracking.enabled
and camera_config.onvif.autotracking.enabled_in_config
):
self._autotracker_setup(camera_config, camera) self._autotracker_setup(camera_config, camera)
def _autotracker_setup(self, camera_config, camera): def _autotracker_setup(self, camera_config, camera):
@@ -226,8 +215,8 @@ class PtzAutoTracker:
maxlen=round(camera_config.detect.fps * 1.5) maxlen=round(camera_config.detect.fps * 1.5)
) )
self.tracked_object_metrics[camera] = { self.tracked_object_metrics[camera] = {
"max_target_box": AUTOTRACKING_MAX_AREA_RATIO "max_target_box": 1
** (1 / self.zoom_factor[camera]) - (AUTOTRACKING_MAX_AREA_RATIO ** self.zoom_factor[camera])
} }
self.calibrating[camera] = False self.calibrating[camera] = False
@@ -238,40 +227,32 @@ class PtzAutoTracker:
self.move_queues[camera] = queue.Queue() self.move_queues[camera] = queue.Queue()
self.move_queue_locks[camera] = threading.Lock() self.move_queue_locks[camera] = threading.Lock()
# handle onvif constructor failing due to no connection
if camera not in self.onvif.cams:
logger.warning(
f"Disabling autotracking for {camera}: onvif connection failed"
)
camera_config.onvif.autotracking.enabled = False
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
return
if not self.onvif.cams[camera]["init"]: if not self.onvif.cams[camera]["init"]:
if not self.onvif._init_onvif(camera): if not self.onvif._init_onvif(camera):
logger.warning( logger.warning(f"Unable to initialize onvif for {camera}")
f"Disabling autotracking for {camera}: Unable to initialize onvif"
)
camera_config.onvif.autotracking.enabled = False camera_config.onvif.autotracking.enabled = False
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
return return
if "pt-r-fov" not in self.onvif.cams[camera]["features"]: if "pt-r-fov" not in self.onvif.cams[camera]["features"]:
camera_config.onvif.autotracking.enabled = False
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
logger.warning( logger.warning(
f"Disabling autotracking for {camera}: FOV relative movement not supported" f"Disabling autotracking for {camera}: FOV relative movement not supported"
) )
camera_config.onvif.autotracking.enabled = False
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
return return
movestatus_supported = self.onvif.get_service_capabilities(camera) movestatus_supported = self.onvif.get_service_capabilities(camera)
if movestatus_supported is None or movestatus_supported.lower() != "true": if movestatus_supported is None or movestatus_supported.lower() != "true":
camera_config.onvif.autotracking.enabled = False
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
logger.warning( logger.warning(
f"Disabling autotracking for {camera}: ONVIF MoveStatus not supported" f"Disabling autotracking for {camera}: ONVIF MoveStatus not supported"
) )
camera_config.onvif.autotracking.enabled = False
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
return return
if self.onvif.cams[camera]["init"]: if self.onvif.cams[camera]["init"]:
@@ -287,10 +268,6 @@ class PtzAutoTracker:
if camera_config.onvif.autotracking.movement_weights: if camera_config.onvif.autotracking.movement_weights:
if len(camera_config.onvif.autotracking.movement_weights) == 5: if len(camera_config.onvif.autotracking.movement_weights) == 5:
camera_config.onvif.autotracking.movement_weights = [
float(val)
for val in camera_config.onvif.autotracking.movement_weights
]
self.ptz_metrics[camera][ self.ptz_metrics[camera][
"ptz_min_zoom" "ptz_min_zoom"
].value = camera_config.onvif.autotracking.movement_weights[0] ].value = camera_config.onvif.autotracking.movement_weights[0]
@@ -313,8 +290,6 @@ class PtzAutoTracker:
if camera_config.onvif.autotracking.calibrate_on_startup: if camera_config.onvif.autotracking.calibrate_on_startup:
self._calibrate_camera(camera) self._calibrate_camera(camera)
self.ptz_metrics[camera]["ptz_tracking_active"].clear()
self.dispatcher.publish(f"{camera}/ptz_autotracker/active", "OFF", retain=False)
self.autotracker_init[camera] = True self.autotracker_init[camera] = True
def _write_config(self, camera): def _write_config(self, camera):
@@ -359,7 +334,7 @@ class PtzAutoTracker:
1, 1,
) )
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set(): while not self.ptz_metrics[camera]["ptz_stopped"].is_set():
self.onvif.get_camera_status(camera) self.onvif.get_camera_status(camera)
zoom_out_values.append(self.ptz_metrics[camera]["ptz_zoom_level"].value) zoom_out_values.append(self.ptz_metrics[camera]["ptz_zoom_level"].value)
@@ -370,7 +345,7 @@ class PtzAutoTracker:
1, 1,
) )
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set(): while not self.ptz_metrics[camera]["ptz_stopped"].is_set():
self.onvif.get_camera_status(camera) self.onvif.get_camera_status(camera)
zoom_in_values.append(self.ptz_metrics[camera]["ptz_zoom_level"].value) zoom_in_values.append(self.ptz_metrics[camera]["ptz_zoom_level"].value)
@@ -388,7 +363,7 @@ class PtzAutoTracker:
1, 1,
) )
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set(): while not self.ptz_metrics[camera]["ptz_stopped"].is_set():
self.onvif.get_camera_status(camera) self.onvif.get_camera_status(camera)
zoom_out_values.append( zoom_out_values.append(
@@ -404,7 +379,7 @@ class PtzAutoTracker:
1, 1,
) )
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set(): while not self.ptz_metrics[camera]["ptz_stopped"].is_set():
self.onvif.get_camera_status(camera) self.onvif.get_camera_status(camera)
zoom_in_values.append( zoom_in_values.append(
@@ -427,10 +402,10 @@ class PtzAutoTracker:
self.config.cameras[camera].onvif.autotracking.return_preset.lower(), self.config.cameras[camera].onvif.autotracking.return_preset.lower(),
) )
self.ptz_metrics[camera]["ptz_reset"].set() self.ptz_metrics[camera]["ptz_reset"].set()
self.ptz_metrics[camera]["ptz_motor_stopped"].clear() self.ptz_metrics[camera]["ptz_stopped"].clear()
# Wait until the camera finishes moving # Wait until the camera finishes moving
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set(): while not self.ptz_metrics[camera]["ptz_stopped"].is_set():
self.onvif.get_camera_status(camera) self.onvif.get_camera_status(camera)
for step in range(num_steps): for step in range(num_steps):
@@ -441,7 +416,7 @@ class PtzAutoTracker:
self.onvif._move_relative(camera, pan, tilt, 0, 1) self.onvif._move_relative(camera, pan, tilt, 0, 1)
# Wait until the camera finishes moving # Wait until the camera finishes moving
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set(): while not self.ptz_metrics[camera]["ptz_stopped"].is_set():
self.onvif.get_camera_status(camera) self.onvif.get_camera_status(camera)
stop_time = time.time() stop_time = time.time()
@@ -459,10 +434,10 @@ class PtzAutoTracker:
self.config.cameras[camera].onvif.autotracking.return_preset.lower(), self.config.cameras[camera].onvif.autotracking.return_preset.lower(),
) )
self.ptz_metrics[camera]["ptz_reset"].set() self.ptz_metrics[camera]["ptz_reset"].set()
self.ptz_metrics[camera]["ptz_motor_stopped"].clear() self.ptz_metrics[camera]["ptz_stopped"].clear()
# Wait until the camera finishes moving # Wait until the camera finishes moving
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set(): while not self.ptz_metrics[camera]["ptz_stopped"].is_set():
self.onvif.get_camera_status(camera) self.onvif.get_camera_status(camera)
logger.info( logger.info(
@@ -546,11 +521,7 @@ class PtzAutoTracker:
camera_height = camera_config.frame_shape[0] camera_height = camera_config.frame_shape[0]
# Extract areas and calculate weighted average # Extract areas and calculate weighted average
# grab the largest dimension of the bounding box and create a square from that areas = [obj["area"] for obj in self.tracked_object_history[camera]]
areas = [
max(obj["box"][2] - obj["box"][0], obj["box"][3] - obj["box"][1]) ** 2
for obj in self.tracked_object_history[camera]
]
filtered_areas = ( filtered_areas = (
remove_outliers(areas) remove_outliers(areas)
@@ -596,11 +567,8 @@ class PtzAutoTracker:
camera_config.frame_shape[1] camera_config.frame_shape[1]
camera_config.frame_shape[0] camera_config.frame_shape[0]
while not self.stop_event.is_set(): while True:
try: move_data = self.move_queues[camera].get()
move_data = self.move_queues[camera].get(True, 0.1)
except queue.Empty:
continue
with self.move_queue_locks[camera]: with self.move_queue_locks[camera]:
frame_time, pan, tilt, zoom = move_data frame_time, pan, tilt, zoom = move_data
@@ -630,9 +598,7 @@ class PtzAutoTracker:
self.onvif._move_relative(camera, pan, tilt, 0, 1) self.onvif._move_relative(camera, pan, tilt, 0, 1)
# Wait until the camera finishes moving # Wait until the camera finishes moving
while not self.ptz_metrics[camera][ while not self.ptz_metrics[camera]["ptz_stopped"].is_set():
"ptz_motor_stopped"
].is_set():
self.onvif.get_camera_status(camera) self.onvif.get_camera_status(camera)
if ( if (
@@ -642,7 +608,7 @@ class PtzAutoTracker:
self.onvif._zoom_absolute(camera, zoom, 1) self.onvif._zoom_absolute(camera, zoom, 1)
# Wait until the camera finishes moving # Wait until the camera finishes moving
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set(): while not self.ptz_metrics[camera]["ptz_stopped"].is_set():
self.onvif.get_camera_status(camera) self.onvif.get_camera_status(camera)
if self.config.cameras[camera].onvif.autotracking.movement_weights: if self.config.cameras[camera].onvif.autotracking.movement_weights:
@@ -720,20 +686,19 @@ class PtzAutoTracker:
camera_height = camera_config.frame_shape[0] camera_height = camera_config.frame_shape[0]
camera_fps = camera_config.detect.fps camera_fps = camera_config.detect.fps
# estimate_velocity is a numpy array of bbox top,left and bottom,right velocities
velocities = obj.obj_data["estimate_velocity"] velocities = obj.obj_data["estimate_velocity"]
logger.debug(f"{camera}: Velocities from norfair: {velocities}") logger.debug(f"{camera}: Velocities from norfair: {velocities}")
# if we are close enough to zero, return right away # if we are close enough to zero, return right away
if np.all(np.round(velocities) == 0): if np.all(np.round(velocities) == 0):
return True, np.zeros((4,)) return True, np.zeros((2, 2))
# Thresholds # Thresholds
x_mags_thresh = camera_width / camera_fps / 2 x_mags_thresh = camera_width / camera_fps / 2
y_mags_thresh = camera_height / camera_fps / 2 y_mags_thresh = camera_height / camera_fps / 2
dir_thresh = 0.93 dir_thresh = 0.93
delta_thresh = 20 delta_thresh = 12
var_thresh = 10 var_thresh = 5
# Check magnitude # Check magnitude
x_mags = np.abs(velocities[:, 0]) x_mags = np.abs(velocities[:, 0])
@@ -757,6 +722,7 @@ class PtzAutoTracker:
np.linalg.norm(velocities[0]) * np.linalg.norm(velocities[1]) np.linalg.norm(velocities[0]) * np.linalg.norm(velocities[1])
) )
dir_thresh = 0.6 if np.all(delta < delta_thresh / 2) else dir_thresh dir_thresh = 0.6 if np.all(delta < delta_thresh / 2) else dir_thresh
print(f"cosine sim: {cosine_sim}")
invalid_dirs = cosine_sim < dir_thresh invalid_dirs = cosine_sim < dir_thresh
# Combine # Combine
@@ -786,10 +752,10 @@ class PtzAutoTracker:
) )
) )
# invalid velocity # invalid velocity
return False, np.zeros((4,)) return False, np.zeros((2, 2))
else: else:
logger.debug(f"{camera}: Valid velocity ") logger.debug(f"{camera}: Valid velocity ")
return True, velocities.flatten() return True, np.mean(velocities, axis=0)
def _get_distance_threshold(self, camera, obj): def _get_distance_threshold(self, camera, obj):
# Returns true if Euclidean distance from object to center of frame is # Returns true if Euclidean distance from object to center of frame is
@@ -870,7 +836,7 @@ class PtzAutoTracker:
# ensure object is not moving quickly # ensure object is not moving quickly
below_velocity_threshold = np.all( below_velocity_threshold = np.all(
np.abs(average_velocity) np.abs(average_velocity)
< np.tile([velocity_threshold_x, velocity_threshold_y], 2) < np.array([velocity_threshold_x, velocity_threshold_y])
) or np.all(average_velocity == 0) ) or np.all(average_velocity == 0)
below_area_threshold = ( below_area_threshold = (
@@ -881,11 +847,21 @@ class PtzAutoTracker:
# introduce some hysteresis to prevent a yo-yo zooming effect # introduce some hysteresis to prevent a yo-yo zooming effect
zoom_out_hysteresis = ( zoom_out_hysteresis = (
self.tracked_object_metrics[camera]["target_box"] self.tracked_object_metrics[camera]["target_box"]
> (
self.tracked_object_metrics[camera]["original_target_box"]
* AUTOTRACKING_ZOOM_OUT_HYSTERESIS
)
or self.tracked_object_metrics[camera]["target_box"]
> self.tracked_object_metrics[camera]["max_target_box"] > self.tracked_object_metrics[camera]["max_target_box"]
* AUTOTRACKING_ZOOM_OUT_HYSTERESIS * AUTOTRACKING_ZOOM_OUT_HYSTERESIS
) )
zoom_in_hysteresis = ( zoom_in_hysteresis = (
self.tracked_object_metrics[camera]["target_box"] self.tracked_object_metrics[camera]["target_box"]
< (
self.tracked_object_metrics[camera]["original_target_box"]
* AUTOTRACKING_ZOOM_IN_HYSTERESIS
)
or self.tracked_object_metrics[camera]["target_box"]
< self.tracked_object_metrics[camera]["max_target_box"] < self.tracked_object_metrics[camera]["max_target_box"]
* AUTOTRACKING_ZOOM_IN_HYSTERESIS * AUTOTRACKING_ZOOM_IN_HYSTERESIS
) )
@@ -962,7 +938,7 @@ class PtzAutoTracker:
camera_height = camera_config.frame_shape[0] camera_height = camera_config.frame_shape[0]
camera_fps = camera_config.detect.fps camera_fps = camera_config.detect.fps
average_velocity = np.zeros((4,)) average_velocity = np.zeros((2, 2))
predicted_box = obj.obj_data["box"] predicted_box = obj.obj_data["box"]
centroid_x = obj.obj_data["centroid"][0] centroid_x = obj.obj_data["centroid"][0]
@@ -990,6 +966,7 @@ class PtzAutoTracker:
# this box could exceed the frame boundaries if velocity is high # this box could exceed the frame boundaries if velocity is high
# but we'll handle that in _enqueue_move() as two separate moves # but we'll handle that in _enqueue_move() as two separate moves
current_box = np.array(obj.obj_data["box"]) current_box = np.array(obj.obj_data["box"])
average_velocity = np.tile(average_velocity, 2)
predicted_box = ( predicted_box = (
current_box current_box
+ camera_fps * predicted_movement_time * average_velocity + camera_fps * predicted_movement_time * average_velocity
@@ -1033,10 +1010,7 @@ class PtzAutoTracker:
zoom = 0 zoom = 0
result = None result = None
current_zoom_level = self.ptz_metrics[camera]["ptz_zoom_level"].value current_zoom_level = self.ptz_metrics[camera]["ptz_zoom_level"].value
target_box = max( target_box = obj.obj_data["area"] / (camera_width * camera_height)
obj.obj_data["box"][2] - obj.obj_data["box"][0],
obj.obj_data["box"][3] - obj.obj_data["box"][1],
) ** 2 / (camera_width * camera_height)
# absolute zooming separately from pan/tilt # absolute zooming separately from pan/tilt
if camera_config.onvif.autotracking.zooming == ZoomingModeEnum.absolute: if camera_config.onvif.autotracking.zooming == ZoomingModeEnum.absolute:
@@ -1081,20 +1055,30 @@ class PtzAutoTracker:
) )
) is not None: ) is not None:
# zoom value # zoom value
ratio = ( limit = (
self.tracked_object_metrics[camera]["max_target_box"] self.tracked_object_metrics[camera]["original_target_box"]
/ self.tracked_object_metrics[camera]["target_box"] if self.tracked_object_metrics[camera]["target_box"]
< self.tracked_object_metrics[camera]["max_target_box"]
else self.tracked_object_metrics[camera]["max_target_box"]
) )
zoom = (ratio - 1) / (ratio + 1) zoom = (
logger.debug( 2
f'{camera}: limit: {self.tracked_object_metrics[camera]["max_target_box"]}, ratio: {ratio} zoom calculation: {zoom}' * (
limit
/ (
self.tracked_object_metrics[camera]["target_box"]
+ limit
) )
)
- 1
)
logger.debug(f"{camera}: Zoom calculation: {zoom}")
if not result: if not result:
# zoom out with special condition if zooming out because of velocity, edges, etc. # zoom out with special condition if zooming out because of velocity, edges, etc.
zoom = -(1 - zoom) if zoom > 0 else -(zoom * 2 + 1) zoom = -(1 - zoom) if zoom > 0 else -(zoom + 1)
if result: if result:
# zoom in # zoom in
zoom = 1 - zoom if zoom > 0 else (zoom * 2 + 1) zoom = 1 - zoom if zoom > 0 else (zoom + 1)
logger.debug(f"{camera}: Zooming: {result} Zoom amount: {zoom}") logger.debug(f"{camera}: Zooming: {result} Zoom amount: {zoom}")
@@ -1133,10 +1117,6 @@ class PtzAutoTracker:
logger.debug( logger.debug(
f"{camera}: New object: {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}" f"{camera}: New object: {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
) )
self.ptz_metrics[camera]["ptz_tracking_active"].set()
self.dispatcher.publish(
f"{camera}/ptz_autotracker/active", "ON", retain=False
)
self.tracked_object[camera] = obj self.tracked_object[camera] = obj
self.tracked_object_history[camera].append(copy.deepcopy(obj.obj_data)) self.tracked_object_history[camera].append(copy.deepcopy(obj.obj_data))
@@ -1219,8 +1199,8 @@ class PtzAutoTracker:
) )
self.tracked_object[camera] = None self.tracked_object[camera] = None
self.tracked_object_metrics[camera] = { self.tracked_object_metrics[camera] = {
"max_target_box": AUTOTRACKING_MAX_AREA_RATIO "max_target_box": 1
** (1 / self.zoom_factor[camera]) - (AUTOTRACKING_MAX_AREA_RATIO ** self.zoom_factor[camera])
} }
def camera_maintenance(self, camera): def camera_maintenance(self, camera):
@@ -1239,7 +1219,7 @@ class PtzAutoTracker:
if not self.autotracker_init[camera]: if not self.autotracker_init[camera]:
self._autotracker_setup(self.config.cameras[camera], camera) self._autotracker_setup(self.config.cameras[camera], camera)
# regularly update camera status # regularly update camera status
if not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set(): if not self.ptz_metrics[camera]["ptz_stopped"].is_set():
self.onvif.get_camera_status(camera) self.onvif.get_camera_status(camera)
# return to preset if tracking is over # return to preset if tracking is over
@@ -1262,7 +1242,7 @@ class PtzAutoTracker:
while not self.move_queues[camera].empty(): while not self.move_queues[camera].empty():
self.move_queues[camera].get() self.move_queues[camera].get()
self.ptz_metrics[camera]["ptz_motor_stopped"].wait() self.ptz_metrics[camera]["ptz_stopped"].wait()
logger.debug( logger.debug(
f"{camera}: Time is {self.ptz_metrics[camera]['ptz_frame_time'].value}, returning to preset: {autotracker_config.return_preset}" f"{camera}: Time is {self.ptz_metrics[camera]['ptz_frame_time'].value}, returning to preset: {autotracker_config.return_preset}"
) )
@@ -1272,11 +1252,7 @@ class PtzAutoTracker:
) )
# update stored zoom level from preset # update stored zoom level from preset
if not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set(): if not self.ptz_metrics[camera]["ptz_stopped"].is_set():
self.onvif.get_camera_status(camera) self.onvif.get_camera_status(camera)
self.ptz_metrics[camera]["ptz_tracking_active"].clear()
self.dispatcher.publish(
f"{camera}/ptz_autotracker/active", "OFF", retain=False
)
self.ptz_metrics[camera]["ptz_reset"].set() self.ptz_metrics[camera]["ptz_reset"].set()

View File

@@ -133,7 +133,6 @@ class OnvifController:
# setup relative moving request for autotracking # setup relative moving request for autotracking
move_request = ptz.create_type("RelativeMove") move_request = ptz.create_type("RelativeMove")
move_request.ProfileToken = profile.token move_request.ProfileToken = profile.token
logger.debug(f"{camera_name}: Relative move request: {move_request}")
if move_request.Translation is None and fov_space_id is not None: if move_request.Translation is None and fov_space_id is not None:
move_request.Translation = status.Position move_request.Translation = status.Position
move_request.Translation.PanTilt.space = ptz_config["Spaces"][ move_request.Translation.PanTilt.space = ptz_config["Spaces"][
@@ -163,10 +162,7 @@ class OnvifController:
) )
if move_request.Speed is None: if move_request.Speed is None:
move_request.Speed = configs.DefaultPTZSpeed if configs else None move_request.Speed = status.Position if status else None
logger.debug(
f"{camera_name}: Relative move request after setup: {move_request}"
)
self.cams[camera_name]["relative_move_request"] = move_request self.cams[camera_name]["relative_move_request"] = move_request
# setup absolute moving request for autotracking zooming # setup absolute moving request for autotracking zooming
@@ -211,9 +207,7 @@ class OnvifController:
self.config.cameras[camera_name].onvif.autotracking.zooming self.config.cameras[camera_name].onvif.autotracking.zooming
== ZoomingModeEnum.relative == ZoomingModeEnum.relative
): ):
self.config.cameras[ self.config.cameras[camera_name].onvif.autotracking.zooming = False
camera_name
].onvif.autotracking.zooming = ZoomingModeEnum.disabled
logger.warning( logger.warning(
f"Disabling autotracking zooming for {camera_name}: Relative zoom not supported" f"Disabling autotracking zooming for {camera_name}: Relative zoom not supported"
) )
@@ -228,9 +222,7 @@ class OnvifController:
self.cams[camera_name]["zoom_limits"] = configs.ZoomLimits self.cams[camera_name]["zoom_limits"] = configs.ZoomLimits
except Exception: except Exception:
if self.config.cameras[camera_name].onvif.autotracking.zooming: if self.config.cameras[camera_name].onvif.autotracking.zooming:
self.config.cameras[ self.config.cameras[camera_name].onvif.autotracking.zooming = False
camera_name
].onvif.autotracking.zooming = ZoomingModeEnum.disabled
logger.warning( logger.warning(
f"Disabling autotracking zooming for {camera_name}: Absolute zoom not supported" f"Disabling autotracking zooming for {camera_name}: Absolute zoom not supported"
) )
@@ -307,7 +299,7 @@ class OnvifController:
return return
self.cams[camera_name]["active"] = True self.cams[camera_name]["active"] = True
self.ptz_metrics[camera_name]["ptz_motor_stopped"].clear() self.ptz_metrics[camera_name]["ptz_stopped"].clear()
logger.debug( logger.debug(
f"{camera_name}: PTZ start time: {self.ptz_metrics[camera_name]['ptz_frame_time'].value}" f"{camera_name}: PTZ start time: {self.ptz_metrics[camera_name]['ptz_frame_time'].value}"
) )
@@ -374,7 +366,7 @@ class OnvifController:
return return
self.cams[camera_name]["active"] = True self.cams[camera_name]["active"] = True
self.ptz_metrics[camera_name]["ptz_motor_stopped"].clear() self.ptz_metrics[camera_name]["ptz_stopped"].clear()
self.ptz_metrics[camera_name]["ptz_start_time"].value = 0 self.ptz_metrics[camera_name]["ptz_start_time"].value = 0
self.ptz_metrics[camera_name]["ptz_stop_time"].value = 0 self.ptz_metrics[camera_name]["ptz_stop_time"].value = 0
move_request = self.cams[camera_name]["move_request"] move_request = self.cams[camera_name]["move_request"]
@@ -421,7 +413,7 @@ class OnvifController:
return return
self.cams[camera_name]["active"] = True self.cams[camera_name]["active"] = True
self.ptz_metrics[camera_name]["ptz_motor_stopped"].clear() self.ptz_metrics[camera_name]["ptz_stopped"].clear()
logger.debug( logger.debug(
f"{camera_name}: PTZ start time: {self.ptz_metrics[camera_name]['ptz_frame_time'].value}" f"{camera_name}: PTZ start time: {self.ptz_metrics[camera_name]['ptz_frame_time'].value}"
) )
@@ -551,8 +543,8 @@ class OnvifController:
zoom_status is None or zoom_status.lower() == "idle" zoom_status is None or zoom_status.lower() == "idle"
): ):
self.cams[camera_name]["active"] = False self.cams[camera_name]["active"] = False
if not self.ptz_metrics[camera_name]["ptz_motor_stopped"].is_set(): if not self.ptz_metrics[camera_name]["ptz_stopped"].is_set():
self.ptz_metrics[camera_name]["ptz_motor_stopped"].set() self.ptz_metrics[camera_name]["ptz_stopped"].set()
logger.debug( logger.debug(
f"{camera_name}: PTZ stop time: {self.ptz_metrics[camera_name]['ptz_frame_time'].value}" f"{camera_name}: PTZ stop time: {self.ptz_metrics[camera_name]['ptz_frame_time'].value}"
@@ -563,8 +555,8 @@ class OnvifController:
]["ptz_frame_time"].value ]["ptz_frame_time"].value
else: else:
self.cams[camera_name]["active"] = True self.cams[camera_name]["active"] = True
if self.ptz_metrics[camera_name]["ptz_motor_stopped"].is_set(): if self.ptz_metrics[camera_name]["ptz_stopped"].is_set():
self.ptz_metrics[camera_name]["ptz_motor_stopped"].clear() self.ptz_metrics[camera_name]["ptz_stopped"].clear()
logger.debug( logger.debug(
f"{camera_name}: PTZ start time: {self.ptz_metrics[camera_name]['ptz_frame_time'].value}" f"{camera_name}: PTZ start time: {self.ptz_metrics[camera_name]['ptz_frame_time'].value}"
@@ -594,7 +586,7 @@ class OnvifController:
# some hikvision cams won't update MoveStatus, so warn if it hasn't changed # some hikvision cams won't update MoveStatus, so warn if it hasn't changed
if ( if (
not self.ptz_metrics[camera_name]["ptz_motor_stopped"].is_set() not self.ptz_metrics[camera_name]["ptz_stopped"].is_set()
and not self.ptz_metrics[camera_name]["ptz_reset"].is_set() and not self.ptz_metrics[camera_name]["ptz_reset"].is_set()
and self.ptz_metrics[camera_name]["ptz_start_time"].value != 0 and self.ptz_metrics[camera_name]["ptz_start_time"].value != 0
and self.ptz_metrics[camera_name]["ptz_frame_time"].value and self.ptz_metrics[camera_name]["ptz_frame_time"].value

View File

@@ -3,15 +3,17 @@
import datetime import datetime
import itertools import itertools
import logging import logging
import os
import threading import threading
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path from pathlib import Path
from peewee import DatabaseError, chunked
from frigate.config import FrigateConfig, RetainModeEnum from frigate.config import FrigateConfig, RetainModeEnum
from frigate.const import CACHE_DIR, RECORD_DIR from frigate.const import CACHE_DIR, RECORD_DIR
from frigate.models import Event, Recordings from frigate.models import Event, Recordings, RecordingsToDelete
from frigate.record.util import remove_empty_directories, sync_recordings from frigate.record.util import remove_empty_directories
from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -31,7 +33,11 @@ class RecordingCleanup(threading.Thread):
logger.debug(f"Checking tmp clip {p}.") logger.debug(f"Checking tmp clip {p}.")
if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1): if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1):
logger.debug("Deleting tmp clip.") logger.debug("Deleting tmp clip.")
clear_and_unlink(p)
# empty contents of file before unlinking https://github.com/blakeblackshear/frigate/issues/4769
with open(p, "w"):
pass
p.unlink(missing_ok=True)
def expire_recordings(self) -> None: def expire_recordings(self) -> None:
"""Delete recordings based on retention config.""" """Delete recordings based on retention config."""
@@ -174,28 +180,76 @@ class RecordingCleanup(threading.Thread):
logger.debug("End all cameras.") logger.debug("End all cameras.")
logger.debug("End expire recordings.") logger.debug("End expire recordings.")
def sync_recordings(self) -> None:
"""Check the db for stale recordings entries that don't exist in the filesystem."""
logger.debug("Start sync recordings.")
# get all recordings in the db
recordings = Recordings.select(Recordings.id, Recordings.path)
# get all recordings files on disk and put them in a set
files_on_disk = {
os.path.join(root, file)
for root, _, files in os.walk(RECORD_DIR)
for file in files
}
# Use pagination to process records in chunks
page_size = 1000
num_pages = (recordings.count() + page_size - 1) // page_size
recordings_to_delete = set()
for page in range(num_pages):
for recording in recordings.paginate(page, page_size):
if recording.path not in files_on_disk:
recordings_to_delete.add(recording.id)
# convert back to list of dictionaries for insertion
recordings_to_delete = [
{"id": recording_id} for recording_id in recordings_to_delete
]
if len(recordings_to_delete) / max(1, recordings.count()) > 0.5:
logger.debug(
f"Deleting {(len(recordings_to_delete) / recordings.count()):2f}% of recordings could be due to configuration error. Aborting..."
)
return
logger.debug(
f"Deleting {len(recordings_to_delete)} recordings with missing files"
)
# create a temporary table for deletion
RecordingsToDelete.create_table(temporary=True)
# insert ids to the temporary table
max_inserts = 1000
for batch in chunked(recordings_to_delete, max_inserts):
RecordingsToDelete.insert_many(batch).execute()
try:
# delete records in the main table that exist in the temporary table
query = Recordings.delete().where(
Recordings.id.in_(RecordingsToDelete.select(RecordingsToDelete.id))
)
query.execute()
except DatabaseError as e:
logger.error(f"Database error during delete: {e}")
logger.debug("End sync recordings.")
def run(self) -> None: def run(self) -> None:
# on startup sync recordings with disk if enabled # on startup sync recordings with disk if enabled
if self.config.record.sync_recordings: if self.config.record.sync_on_startup:
sync_recordings(limited=False) self.sync_recordings()
next_sync = get_tomorrow_at_time(3)
# Expire tmp clips every minute, recordings and clean directories every hour. # Expire tmp clips every minute, recordings and clean directories every hour.
for counter in itertools.cycle(range(self.config.record.expire_interval)): for counter in itertools.cycle(range(self.config.record.expire_interval)):
if self.stop_event.wait(60): if self.stop_event.wait(60):
logger.info("Exiting recording cleanup...") logger.info("Exiting recording cleanup...")
break break
self.clean_tmp_clips() self.clean_tmp_clips()
if (
self.config.record.sync_recordings
and datetime.datetime.now().astimezone(datetime.timezone.utc)
> next_sync
):
sync_recordings(limited=True)
next_sync = get_tomorrow_at_time(3)
if counter == 0: if counter == 0:
self.expire_recordings() self.expire_recordings()
remove_empty_directories(RECORD_DIR) remove_empty_directories(RECORD_DIR)

View File

@@ -6,7 +6,6 @@ import os
import subprocess as sp import subprocess as sp
import threading import threading
from enum import Enum from enum import Enum
from pathlib import Path
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.const import EXPORT_DIR, MAX_PLAYLIST_SECONDS from frigate.const import EXPORT_DIR, MAX_PLAYLIST_SECONDS
@@ -122,7 +121,6 @@ class RecordingExporter(threading.Thread):
f"Failed to export recording for command {' '.join(ffmpeg_cmd)}" f"Failed to export recording for command {' '.join(ffmpeg_cmd)}"
) )
logger.error(p.stderr) logger.error(p.stderr)
Path(file_name).unlink(missing_ok=True)
return return
logger.debug(f"Updating finalized export {file_name}") logger.debug(f"Updating finalized export {file_name}")

View File

@@ -20,10 +20,8 @@ import psutil
from frigate.config import FrigateConfig, RetainModeEnum from frigate.config import FrigateConfig, RetainModeEnum
from frigate.const import ( from frigate.const import (
CACHE_DIR, CACHE_DIR,
CACHE_SEGMENT_FORMAT,
INSERT_MANY_RECORDINGS, INSERT_MANY_RECORDINGS,
MAX_SEGMENT_DURATION, MAX_SEGMENT_DURATION,
MAX_SEGMENTS_IN_CACHE,
RECORD_DIR, RECORD_DIR,
) )
from frigate.models import Event, Recordings from frigate.models import Event, Recordings
@@ -33,8 +31,6 @@ from frigate.util.services import get_video_properties
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
QUEUE_READ_TIMEOUT = 0.00001 # seconds
class SegmentInfo: class SegmentInfo:
def __init__( def __init__(
@@ -78,13 +74,15 @@ class RecordingMaintainer(threading.Thread):
self.end_time_cache: dict[str, Tuple[datetime.datetime, float]] = {} self.end_time_cache: dict[str, Tuple[datetime.datetime, float]] = {}
async def move_files(self) -> None: async def move_files(self) -> None:
cache_files = [ cache_files = sorted(
[
d d
for d in os.listdir(CACHE_DIR) for d in os.listdir(CACHE_DIR)
if os.path.isfile(os.path.join(CACHE_DIR, d)) if os.path.isfile(os.path.join(CACHE_DIR, d))
and d.endswith(".mp4") and d.endswith(".mp4")
and not d.startswith("clip_") and not d.startswith("clip_")
] ]
)
files_in_use = [] files_in_use = []
for process in psutil.process_iter(): for process in psutil.process_iter():
@@ -108,12 +106,8 @@ class RecordingMaintainer(threading.Thread):
cache_path = os.path.join(CACHE_DIR, cache) cache_path = os.path.join(CACHE_DIR, cache)
basename = os.path.splitext(cache)[0] basename = os.path.splitext(cache)[0]
camera, date = basename.rsplit("@", maxsplit=1) camera, date = basename.rsplit("-", maxsplit=1)
start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S")
# important that start_time is utc because recordings are stored and compared in utc
start_time = datetime.datetime.strptime(
date, CACHE_SEGMENT_FORMAT
).astimezone(datetime.timezone.utc)
grouped_recordings[camera].append( grouped_recordings[camera].append(
{ {
@@ -122,14 +116,9 @@ class RecordingMaintainer(threading.Thread):
} }
) )
# delete all cached files past the most recent MAX_SEGMENTS_IN_CACHE # delete all cached files past the most recent 5
keep_count = MAX_SEGMENTS_IN_CACHE keep_count = 5
for camera in grouped_recordings.keys(): for camera in grouped_recordings.keys():
# sort based on start time
grouped_recordings[camera] = sorted(
grouped_recordings[camera], key=lambda s: s["start_time"]
)
segment_count = len(grouped_recordings[camera]) segment_count = len(grouped_recordings[camera])
if segment_count > keep_count: if segment_count > keep_count:
logger.warning( logger.warning(
@@ -174,6 +163,8 @@ class RecordingMaintainer(threading.Thread):
Event.has_clip, Event.has_clip,
) )
.order_by(Event.start_time) .order_by(Event.start_time)
.namedtuples()
.iterator()
) )
tasks.extend( tasks.extend(
@@ -226,8 +217,12 @@ class RecordingMaintainer(threading.Thread):
# if cached file's start_time is earlier than the retain days for the camera # if cached file's start_time is earlier than the retain days for the camera
if start_time <= ( if start_time <= (
datetime.datetime.now().astimezone(datetime.timezone.utc) (
- datetime.timedelta(days=self.config.cameras[camera].record.retain.days) datetime.datetime.now()
- datetime.timedelta(
days=self.config.cameras[camera].record.retain.days
)
)
): ):
# if the cached segment overlaps with the events: # if the cached segment overlaps with the events:
overlaps = False overlaps = False
@@ -265,10 +260,8 @@ class RecordingMaintainer(threading.Thread):
most_recently_processed_frame_time = ( most_recently_processed_frame_time = (
camera_info[-1][0] if len(camera_info) > 0 else 0 camera_info[-1][0] if len(camera_info) > 0 else 0
) )
retain_cutoff = datetime.datetime.fromtimestamp( retain_cutoff = most_recently_processed_frame_time - pre_capture
most_recently_processed_frame_time - pre_capture if end_time.timestamp() < retain_cutoff:
).astimezone(datetime.timezone.utc)
if end_time < retain_cutoff:
Path(cache_path).unlink(missing_ok=True) Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None) self.end_time_cache.pop(cache_path, None)
# else retain days includes this segment # else retain days includes this segment
@@ -280,12 +273,7 @@ class RecordingMaintainer(threading.Thread):
) )
# ensure delayed segment info does not lead to lost segments # ensure delayed segment info does not lead to lost segments
if ( if most_recently_processed_frame_time >= end_time.timestamp():
datetime.datetime.fromtimestamp(
most_recently_processed_frame_time
).astimezone(datetime.timezone.utc)
>= end_time
):
record_mode = self.config.cameras[camera].record.retain.mode record_mode = self.config.cameras[camera].record.retain.mode
return await self.move_segment( return await self.move_segment(
camera, start_time, end_time, duration, cache_path, record_mode camera, start_time, end_time, duration, cache_path, record_mode
@@ -351,18 +339,18 @@ class RecordingMaintainer(threading.Thread):
self.end_time_cache.pop(cache_path, None) self.end_time_cache.pop(cache_path, None)
return return
# directory will be in utc due to start_time being in utc
directory = os.path.join( directory = os.path.join(
RECORD_DIR, RECORD_DIR,
start_time.strftime("%Y-%m-%d/%H"), start_time.astimezone(tz=datetime.timezone.utc).strftime("%Y-%m-%d/%H"),
camera, camera,
) )
if not os.path.exists(directory): if not os.path.exists(directory):
os.makedirs(directory) os.makedirs(directory)
# file will be in utc due to start_time being in utc file_name = (
file_name = f"{start_time.strftime('%M.%S.mp4')}" f"{start_time.replace(tzinfo=datetime.timezone.utc).strftime('%M.%S.mp4')}"
)
file_path = os.path.join(directory, file_name) file_path = os.path.join(directory, file_name)
try: try:
@@ -449,9 +437,7 @@ class RecordingMaintainer(threading.Thread):
current_tracked_objects, current_tracked_objects,
motion_boxes, motion_boxes,
regions, regions,
) = self.object_recordings_info_queue.get( ) = self.object_recordings_info_queue.get(True, timeout=0.01)
True, timeout=QUEUE_READ_TIMEOUT
)
if frame_time < run_start - stale_frame_count_threshold: if frame_time < run_start - stale_frame_count_threshold:
stale_frame_count += 1 stale_frame_count += 1
@@ -487,9 +473,7 @@ class RecordingMaintainer(threading.Thread):
frame_time, frame_time,
dBFS, dBFS,
audio_detections, audio_detections,
) = self.audio_recordings_info_queue.get( ) = self.audio_recordings_info_queue.get(True, timeout=0.01)
True, timeout=QUEUE_READ_TIMEOUT
)
if frame_time < run_start - stale_frame_count_threshold: if frame_time < run_start - stale_frame_count_threshold:
stale_frame_count += 1 stale_frame_count += 1

View File

@@ -1,16 +1,7 @@
"""Recordings Utilities.""" """Recordings Utilities."""
import datetime
import logging
import os import os
from peewee import DatabaseError, chunked
from frigate.const import RECORD_DIR
from frigate.models import Recordings, RecordingsToDelete
logger = logging.getLogger(__name__)
def remove_empty_directories(directory: str) -> None: def remove_empty_directories(directory: str) -> None:
# list all directories recursively and sort them by path, # list all directories recursively and sort them by path,
@@ -26,122 +17,3 @@ def remove_empty_directories(directory: str) -> None:
continue continue
if len(os.listdir(path)) == 0: if len(os.listdir(path)) == 0:
os.rmdir(path) os.rmdir(path)
def sync_recordings(limited: bool) -> None:
"""Check the db for stale recordings entries that don't exist in the filesystem."""
def delete_db_entries_without_file(check_timestamp: float) -> bool:
"""Delete db entries where file was deleted outside of frigate."""
if limited:
recordings = Recordings.select(Recordings.id, Recordings.path).where(
Recordings.start_time >= check_timestamp
)
else:
# get all recordings in the db
recordings = Recordings.select(Recordings.id, Recordings.path)
# Use pagination to process records in chunks
page_size = 1000
num_pages = (recordings.count() + page_size - 1) // page_size
recordings_to_delete = set()
for page in range(num_pages):
for recording in recordings.paginate(page, page_size):
if not os.path.exists(recording.path):
recordings_to_delete.add(recording.id)
if len(recordings_to_delete) == 0:
return True
logger.info(
f"Deleting {len(recordings_to_delete)} recording DB entries with missing files"
)
# convert back to list of dictionaries for insertion
recordings_to_delete = [
{"id": recording_id} for recording_id in recordings_to_delete
]
if float(len(recordings_to_delete)) / max(1, recordings.count()) > 0.5:
logger.debug(
f"Deleting {(float(len(recordings_to_delete)) / recordings.count()):2f}% of recordings DB entries, could be due to configuration error. Aborting..."
)
return False
# create a temporary table for deletion
RecordingsToDelete.create_table(temporary=True)
# insert ids to the temporary table
max_inserts = 1000
for batch in chunked(recordings_to_delete, max_inserts):
RecordingsToDelete.insert_many(batch).execute()
try:
# delete records in the main table that exist in the temporary table
query = Recordings.delete().where(
Recordings.id.in_(RecordingsToDelete.select(RecordingsToDelete.id))
)
query.execute()
except DatabaseError as e:
logger.error(f"Database error during recordings db cleanup: {e}")
return True
def delete_files_without_db_entry(files_on_disk: list[str]):
"""Delete files where file is not inside frigate db."""
files_to_delete = []
for file in files_on_disk:
if not Recordings.select().where(Recordings.path == file).exists():
files_to_delete.append(file)
if len(files_to_delete) == 0:
return True
logger.info(
f"Deleting {len(files_to_delete)} recordings files with missing DB entries"
)
if float(len(files_to_delete)) / max(1, len(files_on_disk)) > 0.5:
logger.debug(
f"Deleting {(float(len(files_to_delete)) / len(files_on_disk)):2f}% of recordings DB entries, could be due to configuration error. Aborting..."
)
return False
for file in files_to_delete:
os.unlink(file)
return True
logger.debug("Start sync recordings.")
# start checking on the hour 36 hours ago
check_point = datetime.datetime.now().replace(
minute=0, second=0, microsecond=0
).astimezone(datetime.timezone.utc) - datetime.timedelta(hours=36)
db_success = delete_db_entries_without_file(check_point.timestamp())
# only try to cleanup files if db cleanup was successful
if db_success:
if limited:
# get recording files from last 36 hours
hour_check = f"{RECORD_DIR}/{check_point.strftime('%Y-%m-%d/%H')}"
files_on_disk = {
os.path.join(root, file)
for root, _, files in os.walk(RECORD_DIR)
for file in files
if root > hour_check
}
else:
# get all recordings files on disk and put them in a set
files_on_disk = {
os.path.join(root, file)
for root, _, files in os.walk(RECORD_DIR)
for file in files
}
delete_files_without_db_entry(files_on_disk)
logger.debug("End sync recordings.")

View File

@@ -10,7 +10,6 @@ from peewee import fn
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR from frigate.const import RECORD_DIR
from frigate.models import Event, Recordings from frigate.models import Event, Recordings
from frigate.util.builtin import clear_and_unlink
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
bandwidth_equation = Recordings.segment_size / ( bandwidth_equation = Recordings.segment_size / (
@@ -36,7 +35,7 @@ class StorageMaintainer(threading.Thread):
if self.camera_storage_stats.get(camera, {}).get("needs_refresh", True): if self.camera_storage_stats.get(camera, {}).get("needs_refresh", True):
self.camera_storage_stats[camera] = { self.camera_storage_stats[camera] = {
"needs_refresh": ( "needs_refresh": (
Recordings.select(fn.COUNT("*")) Recordings.select(fn.COUNT(Recordings.id))
.where(Recordings.camera == camera, Recordings.segment_size > 0) .where(Recordings.camera == camera, Recordings.segment_size > 0)
.scalar() .scalar()
< 50 < 50
@@ -160,13 +159,9 @@ class StorageMaintainer(threading.Thread):
# Delete recordings not retained indefinitely # Delete recordings not retained indefinitely
if not keep: if not keep:
try:
clear_and_unlink(Path(recording.path), missing_ok=False)
deleted_recordings.add(recording.id)
deleted_segments_size += recording.segment_size deleted_segments_size += recording.segment_size
except FileNotFoundError: Path(recording.path).unlink(missing_ok=True)
# this file was not found so we must assume no space was cleaned up deleted_recordings.add(recording.id)
pass
# check if need to delete retained segments # check if need to delete retained segments
if deleted_segments_size < hourly_bandwidth: if deleted_segments_size < hourly_bandwidth:
@@ -188,15 +183,9 @@ class StorageMaintainer(threading.Thread):
if deleted_segments_size > hourly_bandwidth: if deleted_segments_size > hourly_bandwidth:
break break
try:
clear_and_unlink(Path(recording.path), missing_ok=False)
deleted_segments_size += recording.segment_size deleted_segments_size += recording.segment_size
Path(recording.path).unlink(missing_ok=True)
deleted_recordings.add(recording.id) deleted_recordings.add(recording.id)
except FileNotFoundError:
# this file was not found so we must assume no space was cleaned up
pass
else:
logger.info(f"Cleaned up {deleted_segments_size} MB of recordings")
logger.debug(f"Expiring {len(deleted_recordings)} recordings") logger.debug(f"Expiring {len(deleted_recordings)} recordings")
# delete up to 100,000 at a time # delete up to 100,000 at a time

View File

@@ -1651,11 +1651,11 @@ class TestConfig(unittest.TestCase):
runtime_config = frigate_config.runtime_config() runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].onvif.autotracking.movement_weights == [ assert runtime_config.cameras["back"].onvif.autotracking.movement_weights == [
"0.0", 0,
"1.0", 1,
"1.23", 1.23,
"2.34", 2.34,
"0.5", 0.50,
] ]
def test_fails_invalid_movement_weights(self): def test_fails_invalid_movement_weights(self):

View File

@@ -1,7 +1,6 @@
import datetime import datetime
import logging import logging
import os import os
import tempfile
import unittest import unittest
from unittest.mock import MagicMock from unittest.mock import MagicMock
@@ -27,7 +26,6 @@ class TestHttp(unittest.TestCase):
self.db = SqliteQueueDatabase(TEST_DB) self.db = SqliteQueueDatabase(TEST_DB)
models = [Event, Recordings] models = [Event, Recordings]
self.db.bind(models) self.db.bind(models)
self.test_dir = tempfile.mkdtemp()
self.minimal_config = { self.minimal_config = {
"mqtt": {"host": "mqtt"}, "mqtt": {"host": "mqtt"},
@@ -96,7 +94,6 @@ class TestHttp(unittest.TestCase):
rec_bd_id = "1234568.backdoor" rec_bd_id = "1234568.backdoor"
_insert_mock_recording( _insert_mock_recording(
rec_fd_id, rec_fd_id,
os.path.join(self.test_dir, f"{rec_fd_id}.tmp"),
time_keep, time_keep,
time_keep + 10, time_keep + 10,
camera="front_door", camera="front_door",
@@ -105,7 +102,6 @@ class TestHttp(unittest.TestCase):
) )
_insert_mock_recording( _insert_mock_recording(
rec_bd_id, rec_bd_id,
os.path.join(self.test_dir, f"{rec_bd_id}.tmp"),
time_keep + 10, time_keep + 10,
time_keep + 20, time_keep + 20,
camera="back_door", camera="back_door",
@@ -127,7 +123,6 @@ class TestHttp(unittest.TestCase):
rec_fd_id = "1234567.frontdoor" rec_fd_id = "1234567.frontdoor"
_insert_mock_recording( _insert_mock_recording(
rec_fd_id, rec_fd_id,
os.path.join(self.test_dir, f"{rec_fd_id}.tmp"),
time_keep, time_keep,
time_keep + 10, time_keep + 10,
camera="front_door", camera="front_door",
@@ -146,33 +141,13 @@ class TestHttp(unittest.TestCase):
id = "123456.keep" id = "123456.keep"
time_keep = datetime.datetime.now().timestamp() time_keep = datetime.datetime.now().timestamp()
_insert_mock_event( _insert_mock_event(id, time_keep, time_keep + 30, True)
id,
time_keep,
time_keep + 30,
True,
)
rec_k_id = "1234567.keep" rec_k_id = "1234567.keep"
rec_k2_id = "1234568.keep" rec_k2_id = "1234568.keep"
rec_k3_id = "1234569.keep" rec_k3_id = "1234569.keep"
_insert_mock_recording( _insert_mock_recording(rec_k_id, time_keep, time_keep + 10)
rec_k_id, _insert_mock_recording(rec_k2_id, time_keep + 10, time_keep + 20)
os.path.join(self.test_dir, f"{rec_k_id}.tmp"), _insert_mock_recording(rec_k3_id, time_keep + 20, time_keep + 30)
time_keep,
time_keep + 10,
)
_insert_mock_recording(
rec_k2_id,
os.path.join(self.test_dir, f"{rec_k2_id}.tmp"),
time_keep + 10,
time_keep + 20,
)
_insert_mock_recording(
rec_k3_id,
os.path.join(self.test_dir, f"{rec_k3_id}.tmp"),
time_keep + 20,
time_keep + 30,
)
id2 = "7890.delete" id2 = "7890.delete"
time_delete = datetime.datetime.now().timestamp() - 360 time_delete = datetime.datetime.now().timestamp() - 360
@@ -180,24 +155,9 @@ class TestHttp(unittest.TestCase):
rec_d_id = "78901.delete" rec_d_id = "78901.delete"
rec_d2_id = "78902.delete" rec_d2_id = "78902.delete"
rec_d3_id = "78903.delete" rec_d3_id = "78903.delete"
_insert_mock_recording( _insert_mock_recording(rec_d_id, time_delete, time_delete + 10)
rec_d_id, _insert_mock_recording(rec_d2_id, time_delete + 10, time_delete + 20)
os.path.join(self.test_dir, f"{rec_d_id}.tmp"), _insert_mock_recording(rec_d3_id, time_delete + 20, time_delete + 30)
time_delete,
time_delete + 10,
)
_insert_mock_recording(
rec_d2_id,
os.path.join(self.test_dir, f"{rec_d2_id}.tmp"),
time_delete + 10,
time_delete + 20,
)
_insert_mock_recording(
rec_d3_id,
os.path.join(self.test_dir, f"{rec_d3_id}.tmp"),
time_delete + 20,
time_delete + 30,
)
storage.calculate_camera_bandwidth() storage.calculate_camera_bandwidth()
storage.reduce_storage_consumption() storage.reduce_storage_consumption()
@@ -216,42 +176,18 @@ class TestHttp(unittest.TestCase):
id = "123456.keep" id = "123456.keep"
time_keep = datetime.datetime.now().timestamp() time_keep = datetime.datetime.now().timestamp()
_insert_mock_event( _insert_mock_event(id, time_keep, time_keep + 30, True)
id,
time_keep,
time_keep + 30,
True,
)
rec_k_id = "1234567.keep" rec_k_id = "1234567.keep"
rec_k2_id = "1234568.keep" rec_k2_id = "1234568.keep"
rec_k3_id = "1234569.keep" rec_k3_id = "1234569.keep"
_insert_mock_recording( _insert_mock_recording(rec_k_id, time_keep, time_keep + 10)
rec_k_id, _insert_mock_recording(rec_k2_id, time_keep + 10, time_keep + 20)
os.path.join(self.test_dir, f"{rec_k_id}.tmp"), _insert_mock_recording(rec_k3_id, time_keep + 20, time_keep + 30)
time_keep,
time_keep + 10,
)
_insert_mock_recording(
rec_k2_id,
os.path.join(self.test_dir, f"{rec_k2_id}.tmp"),
time_keep + 10,
time_keep + 20,
)
_insert_mock_recording(
rec_k3_id,
os.path.join(self.test_dir, f"{rec_k3_id}.tmp"),
time_keep + 20,
time_keep + 30,
)
time_delete = datetime.datetime.now().timestamp() - 7200 time_delete = datetime.datetime.now().timestamp() - 7200
for i in range(0, 59): for i in range(0, 59):
id = f"{123456 + i}.delete"
_insert_mock_recording( _insert_mock_recording(
id, f"{123456 + i}.delete", time_delete, time_delete + 600
os.path.join(self.test_dir, f"{id}.tmp"),
time_delete,
time_delete + 600,
) )
storage.calculate_camera_bandwidth() storage.calculate_camera_bandwidth()
@@ -283,23 +219,13 @@ def _insert_mock_event(id: str, start: int, end: int, retain: bool) -> Event:
def _insert_mock_recording( def _insert_mock_recording(
id: str, id: str, start: int, end: int, camera="front_door", seg_size=8, seg_dur=10
file: str,
start: int,
end: int,
camera="front_door",
seg_size=8,
seg_dur=10,
) -> Event: ) -> Event:
"""Inserts a basic recording model with a given id.""" """Inserts a basic recording model with a given id."""
# we must open the file so storage maintainer will delete it
with open(file, "w"):
pass
return Recordings.insert( return Recordings.insert(
id=id, id=id,
camera=camera, camera=camera,
path=file, path=f"/recordings/{id}",
start_time=start, start_time=start,
end_time=end, end_time=end,
duration=seg_dur, duration=seg_dur,

View File

@@ -5,7 +5,7 @@ import numpy as np
from norfair.drawing.color import Palette from norfair.drawing.color import Palette
from norfair.drawing.drawer import Drawer from norfair.drawing.drawer import Drawer
from frigate.util.image import intersection, transliterate_to_latin from frigate.util.image import intersection
from frigate.util.object import ( from frigate.util.object import (
get_cluster_boundary, get_cluster_boundary,
get_cluster_candidates, get_cluster_candidates,
@@ -82,11 +82,6 @@ class TestRegion(unittest.TestCase):
assert len(cluster_candidates) == 2 assert len(cluster_candidates) == 2
def test_transliterate_to_latin(self):
self.assertEqual(transliterate_to_latin("frégate"), "fregate")
self.assertEqual(transliterate_to_latin("utilité"), "utilite")
self.assertEqual(transliterate_to_latin("imágé"), "image")
def test_cluster_boundary(self): def test_cluster_boundary(self):
boxes = [(100, 100, 200, 200), (215, 215, 325, 325)] boxes = [(100, 100, 200, 200), (215, 215, 325, 325)]
boundary_boxes = [ boundary_boxes = [

View File

@@ -102,11 +102,5 @@ class TimelineProcessor(threading.Thread):
)[0] )[0]
Timeline.insert(timeline_entry).execute() Timeline.insert(timeline_entry).execute()
elif event_type == "end": elif event_type == "end":
if event_data["has_clip"] or event_data["has_snapshot"]:
timeline_entry[Timeline.class_type] = "gone" timeline_entry[Timeline.class_type] = "gone"
Timeline.insert(timeline_entry).execute() Timeline.insert(timeline_entry).execute()
else:
# if event was not saved then the timeline entries should be deleted
Timeline.delete().where(
Timeline.source_id == event_data["id"]
).execute()

View File

@@ -68,6 +68,7 @@ class NorfairTracker(ObjectTracker):
self.untracked_object_boxes: list[list[int]] = [] self.untracked_object_boxes: list[list[int]] = []
self.disappeared = {} self.disappeared = {}
self.positions = {} self.positions = {}
self.max_disappeared = config.detect.max_disappeared
self.camera_config = config self.camera_config = config
self.detect_config = config.detect self.detect_config = config.detect
self.ptz_metrics = ptz_metrics self.ptz_metrics = ptz_metrics
@@ -80,8 +81,8 @@ class NorfairTracker(ObjectTracker):
self.tracker = Tracker( self.tracker = Tracker(
distance_function=frigate_distance, distance_function=frigate_distance,
distance_threshold=2.5, distance_threshold=2.5,
initialization_delay=self.detect_config.min_initialized, initialization_delay=self.detect_config.fps / 2,
hit_counter_max=self.detect_config.max_disappeared, hit_counter_max=self.max_disappeared,
) )
if self.ptz_autotracker_enabled.value: if self.ptz_autotracker_enabled.value:
self.ptz_motion_estimator = PtzMotionEstimator( self.ptz_motion_estimator = PtzMotionEstimator(

View File

@@ -31,8 +31,7 @@ class CameraMetricsTypes(TypedDict):
class PTZMetricsTypes(TypedDict): class PTZMetricsTypes(TypedDict):
ptz_autotracker_enabled: Synchronized ptz_autotracker_enabled: Synchronized
ptz_tracking_active: Event ptz_stopped: Event
ptz_motor_stopped: Event
ptz_reset: Event ptz_reset: Event
ptz_start_time: Synchronized ptz_start_time: Synchronized
ptz_stop_time: Synchronized ptz_stop_time: Synchronized

View File

@@ -8,7 +8,6 @@ import shlex
import urllib.parse import urllib.parse
from collections import Counter from collections import Counter
from collections.abc import Mapping from collections.abc import Mapping
from pathlib import Path
from typing import Any, Tuple from typing import Any, Tuple
import numpy as np import numpy as np
@@ -16,7 +15,6 @@ import pytz
import yaml import yaml
from ruamel.yaml import YAML from ruamel.yaml import YAML
from tzlocal import get_localzone from tzlocal import get_localzone
from zoneinfo import ZoneInfoNotFoundError
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
@@ -116,8 +114,10 @@ def load_config_with_no_duplicates(raw_config) -> dict:
def clean_camera_user_pass(line: str) -> str: def clean_camera_user_pass(line: str) -> str:
"""Removes user and password from line.""" """Removes user and password from line."""
rtsp_cleaned = re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line) if "rtsp://" in line:
return re.sub(REGEX_HTTP_CAMERA_USER_PASS, "user=*&password=*", rtsp_cleaned) return re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line)
else:
return re.sub(REGEX_HTTP_CAMERA_USER_PASS, "user=*&password=*", line)
def escape_special_characters(path: str) -> str: def escape_special_characters(path: str) -> str:
@@ -158,7 +158,7 @@ def load_labels(path, encoding="utf-8", prefill=91):
return labels return labels
def get_tz_modifiers(tz_name: str) -> Tuple[str, str, int]: def get_tz_modifiers(tz_name: str) -> Tuple[str, str]:
seconds_offset = ( seconds_offset = (
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds() datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
) )
@@ -166,7 +166,7 @@ def get_tz_modifiers(tz_name: str) -> Tuple[str, str, int]:
minutes_offset = int(seconds_offset / 60 - hours_offset * 60) minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
hour_modifier = f"{hours_offset} hour" hour_modifier = f"{hours_offset} hour"
minute_modifier = f"{minutes_offset} minute" minute_modifier = f"{minutes_offset} minute"
return hour_modifier, minute_modifier, seconds_offset return hour_modifier, minute_modifier
def to_relative_box( def to_relative_box(
@@ -265,30 +265,8 @@ def find_by_key(dictionary, target_key):
return None return None
def get_tomorrow_at_time(hour: int) -> datetime.datetime: def get_tomorrow_at_2() -> datetime.datetime:
"""Returns the datetime of the following day at 2am."""
try:
tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1) tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1)
except ZoneInfoNotFoundError: return tomorrow.replace(hour=2, minute=0, second=0).astimezone(
tomorrow = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
days=1
)
logger.warning(
"Using utc for maintenance due to missing or incorrect timezone set"
)
return tomorrow.replace(hour=hour, minute=0, second=0).astimezone(
datetime.timezone.utc datetime.timezone.utc
) )
def clear_and_unlink(file: Path, missing_ok: bool = True) -> None:
"""clear file then unlink to avoid space retained by file descriptors."""
if not missing_ok and not file.exists():
raise FileNotFoundError()
# empty contents of file before unlinking https://github.com/blakeblackshear/frigate/issues/4769
with open(file, "w"):
pass
file.unlink(missing_ok=missing_ok)

View File

@@ -9,32 +9,10 @@ from typing import AnyStr, Optional
import cv2 import cv2
import numpy as np import numpy as np
from unidecode import unidecode
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def transliterate_to_latin(text: str) -> str:
"""
Transliterate a given text to Latin.
This function uses the unidecode library to transliterate the input text to Latin.
It is useful for converting texts with diacritics or non-Latin characters to a
Latin equivalent.
Args:
text (str): The text to be transliterated.
Returns:
str: The transliterated text.
Example:
>>> transliterate_to_latin('frégate')
'fregate'
"""
return unidecode(text)
def draw_timestamp( def draw_timestamp(
frame, frame,
timestamp, timestamp,
@@ -138,9 +116,6 @@ def draw_box_with_label(
): ):
if color is None: if color is None:
color = (0, 0, 255) color = (0, 0, 255)
try:
display_text = transliterate_to_latin("{}: {}".format(label, info))
except Exception:
display_text = "{}: {}".format(label, info) display_text = "{}: {}".format(label, info)
cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), color, thickness) cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), color, thickness)
font_scale = 0.5 font_scale = 0.5
@@ -312,14 +287,17 @@ def yuv_crop_and_resize(frame, region, height=None):
# copy u2 # copy u2
yuv_cropped_frame[ yuv_cropped_frame[
size + uv_channel_y_offset : size + uv_channel_y_offset + uv_crop_height, size + uv_channel_y_offset : size + uv_channel_y_offset + uv_crop_height,
size // 2 + uv_channel_x_offset : size // 2 size // 2
+ uv_channel_x_offset : size // 2
+ uv_channel_x_offset + uv_channel_x_offset
+ uv_crop_width, + uv_crop_width,
] = frame[u2[1] : u2[3], u2[0] : u2[2]] ] = frame[u2[1] : u2[3], u2[0] : u2[2]]
# copy v1 # copy v1
yuv_cropped_frame[ yuv_cropped_frame[
size + size // 4 + uv_channel_y_offset : size size
+ size // 4
+ uv_channel_y_offset : size
+ size // 4 + size // 4
+ uv_channel_y_offset + uv_channel_y_offset
+ uv_crop_height, + uv_crop_height,
@@ -328,11 +306,14 @@ def yuv_crop_and_resize(frame, region, height=None):
# copy v2 # copy v2
yuv_cropped_frame[ yuv_cropped_frame[
size + size // 4 + uv_channel_y_offset : size size
+ size // 4
+ uv_channel_y_offset : size
+ size // 4 + size // 4
+ uv_channel_y_offset + uv_channel_y_offset
+ uv_crop_height, + uv_crop_height,
size // 2 + uv_channel_x_offset : size // 2 size // 2
+ uv_channel_x_offset : size // 2
+ uv_channel_x_offset + uv_channel_x_offset
+ uv_crop_width, + uv_crop_width,
] = frame[v2[1] : v2[3], v2[0] : v2[2]] ] = frame[v2[1] : v2[3], v2[0] : v2[2]]

View File

@@ -30,9 +30,7 @@ GRID_SIZE = 8
def get_camera_regions_grid( def get_camera_regions_grid(
name: str, name: str, detect: DetectConfig
detect: DetectConfig,
min_region_size: int,
) -> list[list[dict[str, any]]]: ) -> list[list[dict[str, any]]]:
"""Build a grid of expected region sizes for a camera.""" """Build a grid of expected region sizes for a camera."""
# get grid from db if available # get grid from db if available
@@ -101,7 +99,7 @@ def get_camera_regions_grid(
box[1] * height, box[1] * height,
(box[0] + box[2]) * width, (box[0] + box[2]) * width,
(box[1] + box[3]) * height, (box[1] + box[3]) * height,
min_region_size, 320,
1.35, 1.35,
) )
# save width of region to grid as relative # save width of region to grid as relative
@@ -176,9 +174,9 @@ def get_region_from_grid(
cell = region_grid[grid_x][grid_y] cell = region_grid[grid_x][grid_y]
# if there is no known data, use original region calculation # if there is no known data, get standard region for motion box
if not cell or not cell["sizes"]: if not cell or not cell["sizes"]:
return box return calculate_region(frame_shape, box[0], box[1], box[2], box[3], min_region)
# convert the calculated region size to relative # convert the calculated region size to relative
calc_size = (box[2] - box[0]) / frame_shape[1] calc_size = (box[2] - box[0]) / frame_shape[1]

View File

@@ -371,7 +371,7 @@ def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
return sp.run(ffprobe_cmd, capture_output=True) return sp.run(ffprobe_cmd, capture_output=True)
async def get_video_properties(url, get_duration=False) -> dict[str, any]: async def get_video_properties(url, get_duration=False):
async def calculate_duration(video: Optional[any]) -> float: async def calculate_duration(video: Optional[any]) -> float:
duration = None duration = None
@@ -405,10 +405,7 @@ async def get_video_properties(url, get_duration=False) -> dict[str, any]:
result = None result = None
if result: if result:
try:
duration = float(result.strip()) duration = float(result.strip())
except ValueError:
duration = -1
else: else:
duration = -1 duration = -1

View File

@@ -16,7 +16,6 @@ from frigate.const import (
ALL_ATTRIBUTE_LABELS, ALL_ATTRIBUTE_LABELS,
ATTRIBUTE_LABEL_MAP, ATTRIBUTE_LABEL_MAP,
CACHE_DIR, CACHE_DIR,
CACHE_SEGMENT_FORMAT,
REQUEST_REGION_GRID, REQUEST_REGION_GRID,
) )
from frigate.log import LogPipe from frigate.log import LogPipe
@@ -27,7 +26,7 @@ from frigate.ptz.autotrack import ptz_moving_at_frame_time
from frigate.track import ObjectTracker from frigate.track import ObjectTracker
from frigate.track.norfair_tracker import NorfairTracker from frigate.track.norfair_tracker import NorfairTracker
from frigate.types import PTZMetricsTypes from frigate.types import PTZMetricsTypes
from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_2
from frigate.util.image import ( from frigate.util.image import (
FrameManager, FrameManager,
SharedMemoryFrameManager, SharedMemoryFrameManager,
@@ -234,15 +233,14 @@ class CameraWatchdog(threading.Thread):
poll = p["process"].poll() poll = p["process"].poll()
if self.config.record.enabled and "record" in p["roles"]: if self.config.record.enabled and "record" in p["roles"]:
latest_segment_time = self.get_latest_segment_datetime( latest_segment_time = self.get_latest_segment_timestamp(
p.get( p.get(
"latest_segment_time", "latest_segment_time", datetime.datetime.now().timestamp()
datetime.datetime.now().astimezone(datetime.timezone.utc),
) )
) )
if datetime.datetime.now().astimezone(datetime.timezone.utc) > ( if datetime.datetime.now().timestamp() > (
latest_segment_time + datetime.timedelta(seconds=120) latest_segment_time + 120
): ):
self.logger.error( self.logger.error(
f"No new recording segments were created for {self.camera_name} in the last 120s. restarting the ffmpeg record process..." f"No new recording segments were created for {self.camera_name} in the last 120s. restarting the ffmpeg record process..."
@@ -290,7 +288,7 @@ class CameraWatchdog(threading.Thread):
) )
self.capture_thread.start() self.capture_thread.start()
def get_latest_segment_datetime(self, latest_segment: datetime.datetime) -> int: def get_latest_segment_timestamp(self, latest_timestamp) -> int:
"""Checks if ffmpeg is still writing recording segments to cache.""" """Checks if ffmpeg is still writing recording segments to cache."""
cache_files = sorted( cache_files = sorted(
[ [
@@ -301,19 +299,17 @@ class CameraWatchdog(threading.Thread):
and not d.startswith("clip_") and not d.startswith("clip_")
] ]
) )
newest_segment_time = latest_segment newest_segment_timestamp = latest_timestamp
for file in cache_files: for file in cache_files:
if self.camera_name in file: if self.camera_name in file:
basename = os.path.splitext(file)[0] basename = os.path.splitext(file)[0]
_, date = basename.rsplit("@", maxsplit=1) _, date = basename.rsplit("-", maxsplit=1)
segment_time = datetime.datetime.strptime( ts = datetime.datetime.strptime(date, "%Y%m%d%H%M%S").timestamp()
date, CACHE_SEGMENT_FORMAT if ts > newest_segment_timestamp:
).astimezone(datetime.timezone.utc) newest_segment_timestamp = ts
if segment_time > newest_segment_time:
newest_segment_time = segment_time
return newest_segment_time return newest_segment_timestamp
class CameraCapture(threading.Thread): class CameraCapture(threading.Thread):
@@ -529,7 +525,7 @@ def process_frames(
fps = process_info["process_fps"] fps = process_info["process_fps"]
detection_fps = process_info["detection_fps"] detection_fps = process_info["detection_fps"]
current_frame_time = process_info["detection_frame"] current_frame_time = process_info["detection_frame"]
next_region_update = get_tomorrow_at_time(2) next_region_update = get_tomorrow_at_2()
fps_tracker = EventsPerSecond() fps_tracker = EventsPerSecond()
fps_tracker.start() fps_tracker.start()
@@ -551,7 +547,7 @@ def process_frames(
except queue.Empty: except queue.Empty:
logger.error(f"Unable to get updated region grid for {camera_name}") logger.error(f"Unable to get updated region grid for {camera_name}")
next_region_update = get_tomorrow_at_time(2) next_region_update = get_tomorrow_at_2()
try: try:
if exit_on_empty: if exit_on_empty:

View File

@@ -1,40 +0,0 @@
"""Peewee migrations -- 020_update_index_recordings.py.
Some examples (model - class or model name)::
> Model = migrator.orm['model_name'] # Return model in current state by name
> migrator.sql(sql) # Run custom SQL
> migrator.python(func, *args, **kwargs) # Run python code
> migrator.create_model(Model) # Create a model (could be used as decorator)
> migrator.remove_model(model, cascade=True) # Remove a model
> migrator.add_fields(model, **fields) # Add fields to a model
> migrator.change_fields(model, **fields) # Change fields
> migrator.remove_fields(model, *field_names, cascade=True)
> migrator.rename_field(model, old_field_name, new_field_name)
> migrator.rename_table(model, new_table_name)
> migrator.add_index(model, *col_names, unique=False)
> migrator.drop_index(model, *col_names)
> migrator.add_not_null(model, *field_names)
> migrator.drop_not_null(model, *field_names)
> migrator.add_default(model, field_name, default)
"""
import peewee as pw
SQL = pw.SQL
def migrate(migrator, database, fake=False, **kwargs):
migrator.sql("DROP INDEX recordings_end_time_start_time")
migrator.sql(
'CREATE INDEX "recordings_camera_start_time_end_time" ON "recordings" ("camera", "start_time" DESC, "end_time" DESC)'
)
migrator.sql(
'CREATE INDEX "recordings_api_recordings_summary" ON "recordings" ("camera", "start_time" DESC, "duration", "motion", "objects")'
)
migrator.sql('CREATE INDEX "recordings_start_time" ON "recordings" ("start_time")')
def rollback(migrator, database, fake=False, **kwargs):
pass

View File

@@ -1,7 +0,0 @@
[build]
base = "docs/"
publish = "build"
command = "npm run build"
environment = { NODE_VERSION = "20" }

View File

@@ -1,3 +1,5 @@
[tool.isort]
profile = "black"
[tool.ruff] [tool.ruff]
ignore = ["E501","E711","E712"] ignore = ["E501","E711","E712"]
extend-select = ["I"]

571
web/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -55,7 +55,7 @@
"postcss": "^8.4.29", "postcss": "^8.4.29",
"prettier": "^3.0.3", "prettier": "^3.0.3",
"tailwindcss": "^3.3.2", "tailwindcss": "^3.3.2",
"typescript": "^5.2.2", "typescript": "^5.0.4",
"vite": "^4.4.9", "vite": "^4.4.9",
"vitest": "^0.34.3" "vitest": "^0.34.3"
} }

View File

@@ -40,7 +40,7 @@ export default function AppBar() {
setShowDialog(false); setShowDialog(false);
setShowDialogWait(true); setShowDialogWait(true);
sendRestart(); sendRestart();
}, [setShowDialog, sendRestart]); }, [setShowDialog]); // eslint-disable-line react-hooks/exhaustive-deps
const handleDismissRestartDialog = useCallback(() => { const handleDismissRestartDialog = useCallback(() => {
setShowDialog(false); setShowDialog(false);

View File

@@ -7,7 +7,6 @@ import axios from 'axios';
axios.defaults.baseURL = `${baseUrl}api/`; axios.defaults.baseURL = `${baseUrl}api/`;
axios.defaults.headers.common = { axios.defaults.headers.common = {
'X-CSRF-TOKEN': 1, 'X-CSRF-TOKEN': 1,
'X-CACHE-BYPASS': 1,
}; };
export function ApiProvider({ children, options }) { export function ApiProvider({ children, options }) {

View File

@@ -67,7 +67,6 @@ export default function Button({
disabled = false, disabled = false,
ariaCapitalize = false, ariaCapitalize = false,
href, href,
target,
type = 'contained', type = 'contained',
...attrs ...attrs
}) { }) {
@@ -102,7 +101,6 @@ export default function Button({
tabindex="0" tabindex="0"
className={classes} className={classes}
href={href} href={href}
target={target}
ref={ref} ref={ref}
onmouseenter={handleMousenter} onmouseenter={handleMousenter}
onmouseleave={handleMouseleave} onmouseleave={handleMouseleave}

View File

@@ -34,18 +34,10 @@ export default function TimelineSummary({ event, onFrameSelected }) {
const [timeIndex, setTimeIndex] = useState(-1); const [timeIndex, setTimeIndex] = useState(-1);
const recordingParams = useMemo(() => { const recordingParams = {
if (!event.end_time) { before: event.end_time || Date.now(),
return {
after: event.start_time, after: event.start_time,
}; };
}
return {
before: event.end_time,
after: event.start_time,
};
}, [event]);
const { data: recordings } = useSWR([`${event.camera}/recordings`, recordingParams], { revalidateOnFocus: false }); const { data: recordings } = useSWR([`${event.camera}/recordings`, recordingParams], { revalidateOnFocus: false });
// calculates the seek seconds by adding up all the seconds in the segments prior to the playback time // calculates the seek seconds by adding up all the seconds in the segments prior to the playback time

View File

@@ -1,19 +0,0 @@
import { h } from 'preact';
import { memo } from 'preact/compat';
export function Submitted({ className = 'h-6 w-6', inner_fill = 'none', outer_stroke = 'currentColor', onClick = () => {} }) {
return (
<svg
xmlns="http://www.w3.org/2000/svg"
className={className}
viewBox="0 0 32 32"
onClick={onClick}
>
<rect x="10" y="15" fill={inner_fill} width="12" height="2"/>
<rect x="15" y="10" fill={inner_fill} width="2" height="12"/>
<circle fill="none" stroke={outer_stroke} stroke-width="2" stroke-miterlimit="10" cx="16" cy="16" r="12"/>
</svg>
);
}
export default memo(Submitted);

View File

@@ -1,21 +0,0 @@
import { h } from 'preact';
import { memo } from 'preact/compat';
export function WebUI({ className = 'h-6 w-6', stroke = 'currentColor', fill = 'none', onClick = () => {} }) {
return (
<svg
xmlns="http://www.w3.org/2000/svg"
className={className}
fill={fill}
viewBox="0 0 24 24"
stroke={stroke}
onClick={onClick}
>
<path
d="M14,3V5H17.59L7.76,14.83L9.17,16.24L19,6.41V10H21V3M19,19H5V5H12V3H5C3.89,3 3,3.9 3,5V19A2,2 0 0,0 5,21H19A2,2 0 0,0 21,19V12H19V19Z"
/>
</svg>
);
}
export default memo(WebUI);

View File

@@ -353,7 +353,6 @@ ${Object.keys(objectMaskPoints)
snap={snap} snap={snap}
width={width} width={width}
height={height} height={height}
setError={setError}
/> />
</div> </div>
<div className="max-w-xs"> <div className="max-w-xs">
@@ -435,7 +434,7 @@ function boundedSize(value, maxValue, snap) {
return newValue; return newValue;
} }
function EditableMask({ onChange, points, scale, snap, width, height, setError }) { function EditableMask({ onChange, points, scale, snap, width, height }) {
const boundingRef = useRef(null); const boundingRef = useRef(null);
const handleMovePoint = useCallback( const handleMovePoint = useCallback(
@@ -456,11 +455,6 @@ function EditableMask({ onChange, points, scale, snap, width, height, setError }
// Add a new point between the closest two other points // Add a new point between the closest two other points
const handleAddPoint = useCallback( const handleAddPoint = useCallback(
(event) => { (event) => {
if (!points) {
setError('You must choose an item to edit or add a new item before adding a point.');
return
}
const { offsetX, offsetY } = event; const { offsetX, offsetY } = event;
const scaledX = boundedSize((offsetX - MaskInset) / scale, width, snap); const scaledX = boundedSize((offsetX - MaskInset) / scale, width, snap);
const scaledY = boundedSize((offsetY - MaskInset) / scale, height, snap); const scaledY = boundedSize((offsetY - MaskInset) / scale, height, snap);
@@ -480,7 +474,7 @@ function EditableMask({ onChange, points, scale, snap, width, height, setError }
newPoints.splice(index, 0, newPoint); newPoints.splice(index, 0, newPoint);
onChange(newPoints); onChange(newPoints);
}, },
[height, width, scale, points, onChange, snap, setError] [height, width, scale, points, onChange, snap]
); );
const handleRemovePoint = useCallback( const handleRemovePoint = useCallback(

View File

@@ -11,7 +11,6 @@ import axios from 'axios';
import { useState, useRef, useCallback, useMemo } from 'preact/hooks'; import { useState, useRef, useCallback, useMemo } from 'preact/hooks';
import VideoPlayer from '../components/VideoPlayer'; import VideoPlayer from '../components/VideoPlayer';
import { StarRecording } from '../icons/StarRecording'; import { StarRecording } from '../icons/StarRecording';
import { Submitted } from '../icons/Submitted';
import { Snapshot } from '../icons/Snapshot'; import { Snapshot } from '../icons/Snapshot';
import { UploadPlus } from '../icons/UploadPlus'; import { UploadPlus } from '../icons/UploadPlus';
import { Clip } from '../icons/Clip'; import { Clip } from '../icons/Clip';
@@ -64,7 +63,6 @@ export default function Events({ path, ...props }) {
time_range: '00:00,24:00', time_range: '00:00,24:00',
timezone, timezone,
favorites: props.favorites ?? 0, favorites: props.favorites ?? 0,
is_submitted: props.is_submitted ?? -1,
event: props.event, event: props.event,
}); });
const [state, setState] = useState({ const [state, setState] = useState({
@@ -123,21 +121,12 @@ export default function Events({ path, ...props }) {
[searchParams] [searchParams]
); );
const { data: ongoingEvents, mutate: refreshOngoingEvents } = useSWR([ const { data: ongoingEvents, mutate: refreshOngoingEvents } = useSWR(['events', { in_progress: 1, include_thumbnails: 0 }]);
'events', const { data: eventPages, mutate: refreshEvents, size, setSize, isValidating } = useSWRInfinite(getKey, eventsFetcher);
{ in_progress: 1, include_thumbnails: 0 },
]);
const {
data: eventPages,
mutate: refreshEvents,
size,
setSize,
isValidating,
} = useSWRInfinite(getKey, eventsFetcher);
const mutate = () => { const mutate = () => {
refreshEvents(); refreshEvents();
refreshOngoingEvents(); refreshOngoingEvents();
}; }
const { data: allLabels } = useSWR(['labels']); const { data: allLabels } = useSWR(['labels']);
const { data: allSubLabels } = useSWR(['sub_labels', { split_joined: 1 }]); const { data: allSubLabels } = useSWR(['sub_labels', { split_joined: 1 }]);
@@ -292,13 +281,6 @@ export default function Events({ path, ...props }) {
[path, searchParams, setSearchParams] [path, searchParams, setSearchParams]
); );
const onClickFilterSubmitted = useCallback(() => {
if (++searchParams.is_submitted > 1) {
searchParams.is_submitted = -1;
}
onFilter('is_submitted', searchParams.is_submitted);
}, [searchParams, onFilter]);
const isDone = (eventPages?.[eventPages.length - 1]?.length ?? 0) < API_LIMIT; const isDone = (eventPages?.[eventPages.length - 1]?.length ?? 0) < API_LIMIT;
// hooks for infinite scroll // hooks for infinite scroll
@@ -412,22 +394,11 @@ export default function Events({ path, ...props }) {
</Button> </Button>
)} )}
<div className="ml-auto flex">
{config.plus.enabled && (
<Submitted
className="h-10 w-10 text-yellow-300 cursor-pointer ml-auto"
onClick={() => onClickFilterSubmitted()}
inner_fill={searchParams.is_submitted == 1 ? 'currentColor' : 'gray'}
outer_stroke={searchParams.is_submitted >= 0 ? 'currentColor' : 'gray'}
/>
)}
<StarRecording <StarRecording
className="h-10 w-10 text-yellow-300 cursor-pointer ml-auto" className="h-10 w-10 text-yellow-300 cursor-pointer ml-auto"
onClick={() => onFilter('favorites', searchParams.favorites ? 0 : 1)} onClick={() => onFilter('favorites', searchParams.favorites ? 0 : 1)}
fill={searchParams.favorites == 1 ? 'currentColor' : 'none'} fill={searchParams.favorites == 1 ? 'currentColor' : 'none'}
/> />
</div>
<div ref={datePicker} className="ml-right"> <div ref={datePicker} className="ml-right">
<CalendarIcon <CalendarIcon
@@ -924,7 +895,7 @@ function Event({
className="flex-grow-0" className="flex-grow-0"
src={ src={
event.has_snapshot event.has_snapshot
? `${apiHost}api/events/${event.id}/snapshot.jpg?bbox=1` ? `${apiHost}api/events/${event.id}/snapshot.jpg`
: `${apiHost}api/events/${event.id}/thumbnail.jpg` : `${apiHost}api/events/${event.id}/thumbnail.jpg`
} }
alt={`${event.label} at ${((event?.data?.top_score || event.top_score) * 100).toFixed( alt={`${event.label} at ${((event?.data?.top_score || event.top_score) * 100).toFixed(

View File

@@ -213,7 +213,7 @@ export default function Export() {
</div> </div>
{exports && ( {exports && (
<div className="p-4 bg-gray-200 dark:bg-gray-800 xl:w-1/2"> <div className="p-4 bg-gray-800 xl:w-1/2">
<Heading size="md">Exports</Heading> <Heading size="md">Exports</Heading>
<Exports <Exports
exports={exports} exports={exports}
@@ -231,7 +231,7 @@ function Exports({ exports, onSetClip, onDeleteClip }) {
return ( return (
<Fragment> <Fragment>
{exports.map((item) => ( {exports.map((item) => (
<div className="my-4 p-4 bg-gray-100 dark:bg-gray-700" key={item.name}> <div className="my-4 p-4 bg-gray-700" key={item.name}>
{item.name.startsWith('in_progress') ? ( {item.name.startsWith('in_progress') ? (
<div className="flex justify-start text-center items-center"> <div className="flex justify-start text-center items-center">
<div> <div>

View File

@@ -12,7 +12,6 @@ import Dialog from '../components/Dialog';
import TimeAgo from '../components/TimeAgo'; import TimeAgo from '../components/TimeAgo';
import copy from 'copy-to-clipboard'; import copy from 'copy-to-clipboard';
import { About } from '../icons/About'; import { About } from '../icons/About';
import { WebUI } from '../icons/WebUI';
const emptyObject = Object.freeze({}); const emptyObject = Object.freeze({});
@@ -41,7 +40,7 @@ export default function System() {
const cameraNames = Object.keys(cameras || emptyObject); const cameraNames = Object.keys(cameras || emptyObject);
const processesNames = Object.keys(processes || emptyObject); const processesNames = Object.keys(processes || emptyObject);
const { data: go2rtc } = useSWR('go2rtc/api'); const { data: go2rtc } = useSWR('go2rtc');
const onHandleFfprobe = async (camera, e) => { const onHandleFfprobe = async (camera, e) => {
if (e) { if (e) {
@@ -103,9 +102,9 @@ export default function System() {
className="text-blue-500 hover:underline" className="text-blue-500 hover:underline"
target="_blank" target="_blank"
rel="noopener noreferrer" rel="noopener noreferrer"
href="/api/go2rtc/streams" href="/live/webrtc/"
> >
streams info dashboard
</Link> </Link>
</span> </span>
)} )}
@@ -302,16 +301,16 @@ export default function System() {
<Tr> <Tr>
<Th>GPU %</Th> <Th>GPU %</Th>
<Th>Memory %</Th> <Th>Memory %</Th>
{'dec' in gpu_usages[gpu] && <Th>Decoder %</Th>} {'dec' in gpu_usages[gpu] && (<Th>Decoder %</Th>)}
{'enc' in gpu_usages[gpu] && <Th>Encoder %</Th>} {'enc' in gpu_usages[gpu] && (<Th>Encoder %</Th>)}
</Tr> </Tr>
</Thead> </Thead>
<Tbody> <Tbody>
<Tr> <Tr>
<Td>{gpu_usages[gpu]['gpu']}</Td> <Td>{gpu_usages[gpu]['gpu']}</Td>
<Td>{gpu_usages[gpu]['mem']}</Td> <Td>{gpu_usages[gpu]['mem']}</Td>
{'dec' in gpu_usages[gpu] && <Td>{gpu_usages[gpu]['dec']}</Td>} {'dec' in gpu_usages[gpu] && (<Td>{gpu_usages[gpu]['dec']}</Td>)}
{'enc' in gpu_usages[gpu] && <Td>{gpu_usages[gpu]['enc']}</Td>} {'enc' in gpu_usages[gpu] && (<Td>{gpu_usages[gpu]['enc']}</Td>)}
</Tr> </Tr>
</Tbody> </Tbody>
</Table> </Table>
@@ -348,17 +347,7 @@ export default function System() {
> >
<div className="capitalize text-lg flex justify-between p-4"> <div className="capitalize text-lg flex justify-between p-4">
<Link href={`/cameras/${camera}`}>{camera.replaceAll('_', ' ')}</Link> <Link href={`/cameras/${camera}`}>{camera.replaceAll('_', ' ')}</Link>
<div className="flex"> <Button onClick={(e) => onHandleFfprobe(camera, e)}>ffprobe</Button>
{config.cameras[camera]['webui_url'] && (
<Button href={config.cameras[camera]['webui_url']} target="_blank">
Web UI
<WebUI className="ml-1 h-4 w-4" fill="white" stroke="white" />
</Button>
)}
<Button className="ml-2" onClick={(e) => onHandleFfprobe(camera, e)}>
ffprobe
</Button>
</div>
</div> </div>
<div className="p-2"> <div className="p-2">
<Table className="w-full"> <Table className="w-full">

View File

@@ -223,13 +223,6 @@ const getUTCOffset = (date: Date, timezone: string): number => {
// locale of en-CA is required for proper locale format // locale of en-CA is required for proper locale format
let iso = utcDate.toLocaleString('en-CA', { timeZone: timezone, hour12: false }).replace(', ', 'T'); let iso = utcDate.toLocaleString('en-CA', { timeZone: timezone, hour12: false }).replace(', ', 'T');
iso += `.${utcDate.getMilliseconds().toString().padStart(3, '0')}`; iso += `.${utcDate.getMilliseconds().toString().padStart(3, '0')}`;
let target = new Date(`${iso}Z`); const target = new Date(`${iso}Z`);
// safari doesn't like the default format
if (isNaN(target.getTime())) {
iso = iso.replace("T", " ").split(".")[0];
target = new Date(`${iso}+000`);
}
return (target.getTime() - utcDate.getTime()) / 60 / 1000; return (target.getTime() - utcDate.getTime()) / 60 / 1000;
}; };

View File

@@ -13,22 +13,7 @@ export default defineConfig({
proxy: { proxy: {
'/api': { '/api': {
target: 'http://localhost:5000' target: 'http://localhost:5000'
}, }
'/vod': {
target: 'http://localhost:5000'
},
'/exports': {
target: 'http://localhost:5000'
},
'/ws': {
target: 'ws://localhost:5000',
ws: true,
},
'/live': {
target: 'ws://localhost:5000',
changeOrigin: true,
ws: true,
},
} }
}, },
plugins: [ plugins: [