Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
859a3a0e6b Bump axios from 1.7.3 to 1.7.4 in /web
Bumps [axios](https://github.com/axios/axios) from 1.7.3 to 1.7.4.
- [Release notes](https://github.com/axios/axios/releases)
- [Changelog](https://github.com/axios/axios/blob/v1.x/CHANGELOG.md)
- [Commits](https://github.com/axios/axios/compare/v1.7.3...v1.7.4)

---
updated-dependencies:
- dependency-name: axios
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-09-18 18:19:26 +00:00
52 changed files with 871 additions and 764 deletions

View File

@@ -52,8 +52,7 @@
"csstools.postcss",
"blanu.vscode-styled-jsx",
"bradlc.vscode-tailwindcss",
"charliermarsh.ruff",
"eamodio.gitlens"
"charliermarsh.ruff"
],
"settings": {
"remote.autoForwardPorts": false,

View File

@@ -179,18 +179,57 @@ jobs:
h8l.tags=${{ steps.setup.outputs.image-name }}-h8l
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l,mode=max
- name: AMD/ROCm general build
env:
AMDGPU: gfx
HSA_OVERRIDE: 0
uses: docker/bake-action@v3
with:
push: true
targets: rocm
files: docker/rocm/rocm.hcl
set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
*.cache-from=type=gha
#- name: AMD/ROCm general build
# env:
# AMDGPU: gfx
# HSA_OVERRIDE: 0
# uses: docker/bake-action@v3
# with:
# push: true
# targets: rocm
# files: docker/rocm/rocm.hcl
# set: |
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
# *.cache-from=type=gha
#- name: AMD/ROCm gfx900
# env:
# AMDGPU: gfx900
# HSA_OVERRIDE: 1
# HSA_OVERRIDE_GFX_VERSION: 9.0.0
# uses: docker/bake-action@v3
# with:
# push: true
# targets: rocm
# files: docker/rocm/rocm.hcl
# set: |
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx900
# *.cache-from=type=gha
#- name: AMD/ROCm gfx1030
# env:
# AMDGPU: gfx1030
# HSA_OVERRIDE: 1
# HSA_OVERRIDE_GFX_VERSION: 10.3.0
# uses: docker/bake-action@v3
# with:
# push: true
# targets: rocm
# files: docker/rocm/rocm.hcl
# set: |
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx1030
# *.cache-from=type=gha
#- name: AMD/ROCm gfx1100
# env:
# AMDGPU: gfx1100
# HSA_OVERRIDE: 1
# HSA_OVERRIDE_GFX_VERSION: 11.0.0
# uses: docker/bake-action@v3
# with:
# push: true
# targets: rocm
# files: docker/rocm/rocm.hcl
# set: |
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx1100
# *.cache-from=type=gha
# The majority of users running arm64 are rpi users, so the rpi
# build should be the primary arm64 image
assemble_default_build:

3
.gitignore vendored
View File

@@ -1,6 +1,5 @@
.DS_Store
__pycache__
.mypy_cache
*.pyc
*.swp
debug
.vscode/*

View File

@@ -40,34 +40,35 @@ apt-get -qq install --no-install-recommends --no-install-suggests -y \
# btbn-ffmpeg -> amd64
if [[ "${TARGETARCH}" == "amd64" ]]; then
mkdir -p /usr/lib/ffmpeg/5.0
mkdir -p /usr/lib/ffmpeg/7.0
mkdir -p /usr/lib/ffmpeg/6.0
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-08-31-12-50/ffmpeg-n6.1.2-2-gb534cc666e-linux64-gpl-6.1.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/6.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/6.0/doc /usr/lib/ffmpeg/6.0/bin/ffplay
fi
# ffmpeg -> arm64
if [[ "${TARGETARCH}" == "arm64" ]]; then
mkdir -p /usr/lib/ffmpeg/5.0
mkdir -p /usr/lib/ffmpeg/7.0
mkdir -p /usr/lib/ffmpeg/6.0
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-08-31-12-50/ffmpeg-n6.1.2-2-gb534cc666e-linuxarm64-gpl-6.1.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/6.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/6.0/doc /usr/lib/ffmpeg/6.0/bin/ffplay
fi
# arch specific packages
if [[ "${TARGETARCH}" == "amd64" ]]; then
# use debian bookworm for amd / intel-i965 driver packages
# use debian bookworm for hwaccel packages
echo 'deb https://deb.debian.org/debian bookworm main contrib non-free' >/etc/apt/sources.list.d/debian-bookworm.list
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y \
i965-va-driver intel-gpu-tools onevpl-tools \
intel-opencl-icd intel-media-va-driver-non-free i965-va-driver \
libmfx-gen1.2 libmfx1 onevpl-tools intel-gpu-tools \
libva-drm2 \
mesa-va-drivers radeontop
@@ -76,22 +77,11 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
i965-va-driver-shaders
rm -f /etc/apt/sources.list.d/debian-bookworm.list
# use intel apt intel packages
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y \
intel-opencl-icd intel-level-zero-gpu intel-media-va-driver-non-free \
libmfx1 libmfxgen1 libvpl2
rm -f /usr/share/keyrings/intel-graphics.gpg
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
fi
if [[ "${TARGETARCH}" == "arm64" ]]; then
apt-get -qq install --no-install-recommends --no-install-suggests -y \
libva-drm2 mesa-va-drivers radeontop
libva-drm2 mesa-va-drivers
fi
# install vulkan

View File

@@ -1,3 +1,3 @@
# ONNX
onnxruntime-openvino == 1.19.* ; platform_machine == 'x86_64'
onnxruntime == 1.19.* ; platform_machine == 'aarch64'
onnxruntime-openvino == 1.18.* ; platform_machine == 'x86_64'
onnxruntime == 1.18.* ; platform_machine == 'aarch64'

View File

@@ -15,10 +15,12 @@ peewee_migrate == 1.13.*
psutil == 5.9.*
pydantic == 2.8.*
git+https://github.com/fbcotter/py3nvml#egg=py3nvml
pytz == 2024.2
PyYAML == 6.0.*
pytz == 2024.1
pyzmq == 26.2.*
ruamel.yaml == 0.18.*
tzlocal == 5.2
types-PyYAML == 6.0.*
requests == 2.32.*
types-requests == 2.32.*
scipy == 1.13.*
@@ -26,8 +28,8 @@ norfair == 2.2.*
setproctitle == 1.3.*
ws4py == 0.5.*
unidecode == 1.3.*
# OpenVino (ONNX installed in wheels-post)
openvino == 2024.3.*
# OpenVino & ONNX
openvino == 2024.1.*
# Embeddings
chromadb == 0.5.0
onnx_clip == 4.0.*

View File

@@ -6,19 +6,16 @@ import shutil
import sys
from pathlib import Path
from ruamel.yaml import YAML
import yaml
sys.path.insert(0, "/opt/frigate")
from frigate.const import (
BIRDSEYE_PIPE,
DEFAULT_FFMPEG_VERSION,
INCLUDED_FFMPEG_VERSIONS,
from frigate.const import BIRDSEYE_PIPE # noqa: E402
from frigate.ffmpeg_presets import ( # noqa: E402
parse_preset_hardware_acceleration_encode,
)
from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode
sys.path.remove("/opt/frigate")
yaml = YAML()
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
# read docker secret files as env vars too
@@ -41,7 +38,7 @@ try:
raw_config = f.read()
if config_file.endswith((".yaml", ".yml")):
config: dict[str, any] = yaml.load(raw_config)
config: dict[str, any] = yaml.safe_load(raw_config)
elif config_file.endswith(".json"):
config: dict[str, any] = json.loads(raw_config)
except FileNotFoundError:
@@ -113,11 +110,13 @@ else:
path = config.get("ffmpeg", {}).get("path", "default")
if path == "default":
if shutil.which("ffmpeg") is None:
ffmpeg_path = f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
ffmpeg_path = "/usr/lib/ffmpeg/6.0/bin/ffmpeg"
else:
ffmpeg_path = "ffmpeg"
elif path in INCLUDED_FFMPEG_VERSIONS:
ffmpeg_path = f"/usr/lib/ffmpeg/{path}/bin/ffmpeg"
elif path == "6.0":
ffmpeg_path = "/usr/lib/ffmpeg/6.0/bin/ffmpeg"
elif path == "5.0":
ffmpeg_path = "/usr/lib/ffmpeg/5.0/bin/ffmpeg"
else:
ffmpeg_path = f"{path}/bin/ffmpeg"

View File

@@ -3,9 +3,7 @@
import json
import os
from ruamel.yaml import YAML
yaml = YAML()
import yaml
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
@@ -19,7 +17,7 @@ try:
raw_config = f.read()
if config_file.endswith((".yaml", ".yml")):
config: dict[str, any] = yaml.load(raw_config)
config: dict[str, any] = yaml.safe_load(raw_config)
elif config_file.endswith(".json"):
config: dict[str, any] = json.loads(raw_config)
except FileNotFoundError:

View File

@@ -3,9 +3,7 @@
import json
import os
from ruamel.yaml import YAML
yaml = YAML()
import yaml
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
@@ -19,7 +17,7 @@ try:
raw_config = f.read()
if config_file.endswith((".yaml", ".yml")):
config: dict[str, any] = yaml.load(raw_config)
config: dict[str, any] = yaml.safe_load(raw_config)
elif config_file.endswith(".json"):
config: dict[str, any] = json.loads(raw_config)
except FileNotFoundError:

View File

@@ -24,4 +24,3 @@ RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffmpeg /usr/lib/ffmpeg/6.0/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffprobe /usr/lib/ffmpeg/6.0/bin/
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}"

View File

@@ -23,11 +23,11 @@ COPY docker/rocm/rocm-pin-600 /etc/apt/preferences.d/
RUN apt-get update
RUN apt-get -y install --no-install-recommends migraphx hipfft roctracer
RUN apt-get -y install --no-install-recommends migraphx
RUN apt-get -y install --no-install-recommends migraphx-dev
RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib
RUN cd /opt/rocm-$ROCM/lib && cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocfft*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/
RUN cd /opt/rocm-$ROCM/lib && cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/
RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm
RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/
@@ -69,11 +69,7 @@ RUN apt-get -y install libnuma1
WORKDIR /opt/frigate/
COPY --from=rootfs / /
COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
RUN python3 -m pip install --upgrade pip \
&& pip3 uninstall -y onnxruntime-openvino \
&& pip3 install -r /requirements.txt
COPY docker/rocm/rootfs/ /
#######################################################################
FROM scratch AS rocm-dist
@@ -105,3 +101,6 @@ ENV HSA_OVERRIDE_GFX_VERSION=$HSA_OVERRIDE_GFX_VERSION
#######################################################################
FROM rocm-prelim-hsa-override$HSA_OVERRIDE as rocm-deps
# Request yolov8 download at startup
ENV DOWNLOAD_YOLOV8=1

View File

@@ -1 +0,0 @@
onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v1.0.0/onnxruntime_rocm-1.17.3-cp39-cp39-linux_x86_64.whl

View File

@@ -0,0 +1,20 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Compile YoloV8 ONNX files into ROCm MIGraphX files
OVERRIDE=$(cd /opt/frigate && python3 -c 'import frigate.detectors.plugins.rocm as rocm; print(rocm.auto_override_gfx_version())')
if ! test -z "$OVERRIDE"; then
echo "Using HSA_OVERRIDE_GFX_VERSION=${OVERRIDE}"
export HSA_OVERRIDE_GFX_VERSION=$OVERRIDE
fi
for onnx in /config/model_cache/yolov8/*.onnx
do
mxr="${onnx%.onnx}.mxr"
if ! test -f $mxr; then
echo "processing $onnx into $mxr"
/opt/rocm/bin/migraphx-driver compile $onnx --optimize --gpu --enable-offload-copy --binary -o $mxr
fi
done

View File

@@ -0,0 +1 @@
oneshot

View File

@@ -0,0 +1 @@
/etc/s6-overlay/s6-rc.d/compile-rocm-models/run

View File

@@ -3,6 +3,8 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.03-py3
# Make this a separate target so it can be built/cached optionally
FROM wheels as trt-wheels
ARG DEBIAN_FRONTEND
@@ -13,7 +15,7 @@ COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
# Build CuDNN
FROM wget AS cudnn-deps
FROM ${TRT_BASE} AS cudnn-deps
ARG COMPUTE_LEVEL

View File

@@ -65,33 +65,24 @@ Or map in all the `/dev/video*` devices.
## Intel-based CPUs
**Recommended hwaccel Preset**
| CPU Generation | Intel Driver | Recommended Preset | Notes |
| -------------- | ------------ | ------------------ | ----------------------------------- |
| gen1 - gen7 | i965 | preset-vaapi | qsv is not supported |
| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-* can also be used |
| gen13+ | iHD / Xe | preset-intel-qsv-* | |
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-* | |
:::note
The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
See [The Intel Docs](https://www.intel.com/content/www/us/en/support/articles/000005505/processors.html to figure out what generation your CPU is.)
:::
### Via VAAPI
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. VAAPI is recommended for all generations of Intel-based CPUs.
```yaml
ffmpeg:
hwaccel_args: preset-vaapi
```
### Via Quicksync
:::note
With some of the processors, like the J4125, the default driver `iHD` doesn't seem to work correctly for hardware acceleration. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
:::
### Via Quicksync (>=10th Generation only)
If VAAPI does not work for you, you can try QSV if your processor supports it. QSV must be set specifically based on the video encoding of the stream.
#### H.264 streams

View File

@@ -9,11 +9,6 @@ Frigate supports multiple different detectors that work on different types of ha
**Most Hardware**
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
- [Hailo](#hailo-8l): The Hailo8 AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices.
**AMD**
- [ROCm](#amdrocm-gpu-detector): ROCm can run on AMD Discrete GPUs to provide efficient object detection.
- [ONNX](#onnx): ROCm will automatically be detected and used as a detector in the `-rocm` Frigate image when a supported ONNX model is configured.
**Intel**
- [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection.
@@ -21,7 +16,7 @@ Frigate supports multiple different detectors that work on different types of ha
**Nvidia**
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs, using one of many default models.
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX is configured.
**Rockchip**
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
@@ -317,121 +312,6 @@ model:
height: 320
```
## AMD/ROCm GPU detector
### Setup
The `rocm` detector supports running YOLO-NAS models on AMD GPUs. Use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`.
### Docker settings for GPU access
ROCm needs access to the `/dev/kfd` and `/dev/dri` devices. When docker or frigate is not run under root then also `video` (and possibly `render` and `ssl/_ssl`) groups should be added.
When running docker directly the following flags should be added for device access:
```bash
$ docker run --device=/dev/kfd --device=/dev/dri \
...
```
When using docker compose:
```yaml
services:
frigate:
---
devices:
- /dev/dri
- /dev/kfd
```
For reference on recommended settings see [running ROCm/pytorch in Docker](https://rocm.docs.amd.com/projects/install-on-linux/en/develop/how-to/3rd-party/pytorch-install.html#using-docker-with-pytorch-pre-installed).
### Docker settings for overriding the GPU chipset
Your GPU might work just fine without any special configuration but in many cases they need manual settings. AMD/ROCm software stack comes with a limited set of GPU drivers and for newer or missing models you will have to override the chipset version to an older/generic version to get things working.
Also AMD/ROCm does not "officially" support integrated GPUs. It still does work with most of them just fine but requires special settings. One has to configure the `HSA_OVERRIDE_GFX_VERSION` environment variable. See the [ROCm bug report](https://github.com/ROCm/ROCm/issues/1743) for context and examples.
For the rocm frigate build there is some automatic detection:
- gfx90c -> 9.0.0
- gfx1031 -> 10.3.0
- gfx1103 -> 11.0.0
If you have something else you might need to override the `HSA_OVERRIDE_GFX_VERSION` at Docker launch. Suppose the version you want is `9.0.0`, then you should configure it from command line as:
```bash
$ docker run -e HSA_OVERRIDE_GFX_VERSION=9.0.0 \
...
```
When using docker compose:
```yaml
services:
frigate:
...
environment:
HSA_OVERRIDE_GFX_VERSION: "9.0.0"
```
Figuring out what version you need can be complicated as you can't tell the chipset name and driver from the AMD brand name.
- first make sure that rocm environment is running properly by running `/opt/rocm/bin/rocminfo` in the frigate container -- it should list both the CPU and the GPU with their properties
- find the chipset version you have (gfxNNN) from the output of the `rocminfo` (see below)
- use a search engine to query what `HSA_OVERRIDE_GFX_VERSION` you need for the given gfx name ("gfxNNN ROCm HSA_OVERRIDE_GFX_VERSION")
- override the `HSA_OVERRIDE_GFX_VERSION` with relevant value
- if things are not working check the frigate docker logs
#### Figuring out if AMD/ROCm is working and found your GPU
```bash
$ docker exec -it frigate /opt/rocm/bin/rocminfo
```
#### Figuring out your AMD GPU chipset version:
We unset the `HSA_OVERRIDE_GFX_VERSION` to prevent an existing override from messing up the result:
```bash
$ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo |grep gfx)'
```
### Supported Models
There is no default model provided, the following formats are supported:
#### YOLO-NAS
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
:::warning
The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
:::
The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired.
After placing the downloaded onnx model in your config folder, you can use the following configuration:
```yaml
detectors:
onnx:
type: rocm
model:
model_type: yolonas
width: 320 # <--- should match whatever was set in notebook
height: 320 # <--- should match whatever was set in notebook
input_pixel_format: bgr
path: /config/yolo_nas_s.onnx
labelmap_path: /labelmap/coco-80.txt
```
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
## ONNX
ONNX is an open format for building machine learning models, Frigate supports running ONNX models on CPU, OpenVINO, and TensorRT. On startup Frigate will automatically try to use a GPU if one is available.
@@ -595,7 +475,7 @@ $ cat /sys/kernel/debug/rknpu/load
## Hailo-8l
This detector is available for use with Hailo-8 AI Acceleration Module.
This detector is available if you are using the Raspberry Pi 5 with Hailo-8L AI Kit. This has not been tested using the Hailo-8L with other hardware.
### Configuration

View File

@@ -87,10 +87,6 @@ Inference speeds will vary greatly depending on the GPU and the model used.
| Quadro P400 2GB | 20 - 25 ms |
| Quadro P2000 | ~ 12 ms |
#### AMD GPUs
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many AMD GPUs.
### Community Supported:
#### Nvidia Jetson

View File

@@ -248,7 +248,7 @@ def config_save():
# Validate the config schema
try:
FrigateConfig.parse_yaml(new_config)
FrigateConfig.parse_raw(new_config)
except Exception:
return make_response(
jsonify(
@@ -336,7 +336,7 @@ def config_set():
f.close()
# Validate the config schema
try:
config_obj = FrigateConfig.parse_yaml(new_raw_config)
config_obj = FrigateConfig.parse_raw(new_raw_config)
except Exception:
with open(config_file, "w") as f:
f.write(old_raw_config)
@@ -361,8 +361,8 @@ def config_set():
json = request.get_json(silent=True) or {}
if json.get("requires_restart", 1) == 0:
current_app.frigate_config = FrigateConfig.parse_object(
config_obj, plus_api=current_app.plus_api
current_app.frigate_config = FrigateConfig.runtime_config(
config_obj, current_app.plus_api
)
return make_response(

View File

@@ -353,10 +353,7 @@ def events_search():
after = request.args.get("after", type=float)
before = request.args.get("before", type=float)
# for similarity search
event_id = request.args.get("event_id", type=str)
if not query and not event_id:
if not query:
return make_response(
jsonify(
{
@@ -435,7 +432,7 @@ def events_search():
if search_type == "similarity":
# Grab the ids of events that match the thumbnail image embeddings
try:
search_event: Event = Event.get(Event.id == event_id)
search_event: Event = Event.get(Event.id == query)
except DoesNotExist:
return make_response(
jsonify(

View File

@@ -129,7 +129,8 @@ class FrigateApp:
# check if the config file needs to be migrated
migrate_frigate_config(config_file)
self.config = FrigateConfig.parse_file(config_file, plus_api=self.plus_api)
user_config = FrigateConfig.parse_file(config_file)
self.config = user_config.runtime_config(self.plus_api)
for camera_name in self.config.cameras.keys():
# create camera_metrics

View File

@@ -6,11 +6,10 @@ import os
import shutil
from enum import Enum
from pathlib import Path
from typing import Annotated, Any, Dict, List, Optional, Tuple, Union
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from pydantic import (
AfterValidator,
BaseModel,
ConfigDict,
Field,
@@ -18,11 +17,8 @@ from pydantic import (
ValidationInfo,
field_serializer,
field_validator,
model_validator,
)
from pydantic.fields import PrivateAttr
from ruamel.yaml import YAML
from typing_extensions import Self
from frigate.const import (
ALL_ATTRIBUTE_LABELS,
@@ -30,12 +26,10 @@ from frigate.const import (
CACHE_DIR,
CACHE_SEGMENT_FORMAT,
DEFAULT_DB_PATH,
DEFAULT_FFMPEG_VERSION,
FREQUENCY_STATS_POINTS,
INCLUDED_FFMPEG_VERSIONS,
MAX_PRE_CAPTURE,
REGEX_CAMERA_NAME,
REGEX_JSON,
YAML_EXT,
)
from frigate.detectors import DetectorConfig, ModelConfig
from frigate.detectors.detector_config import BaseDetectorConfig
@@ -45,11 +39,13 @@ from frigate.ffmpeg_presets import (
parse_preset_input,
parse_preset_output_record,
)
from frigate.plus import PlusApi
from frigate.util.builtin import (
deep_merge,
escape_special_characters,
generate_color_palette,
get_ffmpeg_arg_list,
load_config_with_no_duplicates,
)
from frigate.util.config import StreamInfoRetriever, get_relative_coordinates
from frigate.util.image import create_mask
@@ -57,8 +53,6 @@ from frigate.util.services import auto_detect_hwaccel
logger = logging.getLogger(__name__)
yaml = YAML()
# TODO: Identify what the default format to display timestamps is
DEFAULT_TIME_FORMAT = "%m/%d/%Y %H:%M:%S"
# German Style:
@@ -107,13 +101,6 @@ class DateTimeStyleEnum(str, Enum):
short = "short"
def validate_env_string(v: str) -> str:
return v.format(**FRIGATE_ENV_VARS)
EnvString = Annotated[str, AfterValidator(validate_env_string)]
class UIConfig(FrigateBaseModel):
timezone: Optional[str] = Field(default=None, title="Override UI timezone.")
time_format: TimeFormatEnum = Field(
@@ -148,7 +135,7 @@ class ProxyConfig(FrigateBaseModel):
logout_url: Optional[str] = Field(
default=None, title="Redirect url for logging out with proxy."
)
auth_secret: Optional[EnvString] = Field(
auth_secret: Optional[str] = Field(
default=None,
title="Secret value for proxy authentication.",
)
@@ -219,10 +206,8 @@ class MqttConfig(FrigateBaseModel):
stats_interval: int = Field(
default=60, ge=FREQUENCY_STATS_POINTS, title="MQTT Camera Stats Interval"
)
user: Optional[EnvString] = Field(None, title="MQTT Username")
password: Optional[EnvString] = Field(
None, title="MQTT Password", validate_default=True
)
user: Optional[str] = Field(None, title="MQTT Username")
password: Optional[str] = Field(None, title="MQTT Password", validate_default=True)
tls_ca_certs: Optional[str] = Field(None, title="MQTT TLS CA Certificates")
tls_client_cert: Optional[str] = Field(None, title="MQTT TLS Client Certificate")
tls_client_key: Optional[str] = Field(None, title="MQTT TLS Client Key")
@@ -297,8 +282,8 @@ class PtzAutotrackConfig(FrigateBaseModel):
class OnvifConfig(FrigateBaseModel):
host: str = Field(default="", title="Onvif Host")
port: int = Field(default=8000, title="Onvif Port")
user: Optional[EnvString] = Field(None, title="Onvif Username")
password: Optional[EnvString] = Field(None, title="Onvif Password")
user: Optional[str] = Field(None, title="Onvif Username")
password: Optional[str] = Field(None, title="Onvif Password")
autotracking: PtzAutotrackConfig = Field(
default_factory=PtzAutotrackConfig,
title="PTZ auto tracking config.",
@@ -769,7 +754,7 @@ class GenAIConfig(FrigateBaseModel):
default=GenAIProviderEnum.openai, title="GenAI provider."
)
base_url: Optional[str] = Field(None, title="Provider base url.")
api_key: Optional[EnvString] = Field(None, title="Provider API key.")
api_key: Optional[str] = Field(None, title="Provider API key.")
model: str = Field(default="gpt-4o", title="GenAI model.")
prompt: str = Field(
default="Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background.",
@@ -911,23 +896,27 @@ class FfmpegConfig(FrigateBaseModel):
def ffmpeg_path(self) -> str:
if self.path == "default":
if shutil.which("ffmpeg") is None:
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
return "/usr/lib/ffmpeg/6.0/bin/ffmpeg"
else:
return "ffmpeg"
elif self.path in INCLUDED_FFMPEG_VERSIONS:
return f"/usr/lib/ffmpeg/{self.path}/bin/ffmpeg"
elif self.path == "6.0":
return "/usr/lib/ffmpeg/6.0/bin/ffmpeg"
elif self.path == "5.0":
return "/usr/lib/ffmpeg/5.0/bin/ffmpeg"
else:
return f"{self.path}/bin/ffmpeg"
@property
def ffprobe_path(self) -> str:
if self.path == "default":
if shutil.which("ffprobe") is None:
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe"
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59:
return "/usr/lib/ffmpeg/6.0/bin/ffprobe"
else:
return "ffprobe"
elif self.path in INCLUDED_FFMPEG_VERSIONS:
return f"/usr/lib/ffmpeg/{self.path}/bin/ffprobe"
elif self.path == "6.0":
return "/usr/lib/ffmpeg/6.0/bin/ffprobe"
elif self.path == "5.0":
return "/usr/lib/ffmpeg/5.0/bin/ffprobe"
else:
return f"{self.path}/bin/ffprobe"
@@ -939,7 +928,7 @@ class CameraRoleEnum(str, Enum):
class CameraInput(FrigateBaseModel):
path: EnvString = Field(title="Camera input path.")
path: str = Field(title="Camera input path.")
roles: List[CameraRoleEnum] = Field(title="Roles assigned to this input.")
global_args: Union[str, List[str]] = Field(
default_factory=list, title="FFmpeg global arguments."
@@ -1359,15 +1348,17 @@ def verify_recording_segments_setup_with_reasonable_time(
if record_args[0].startswith("preset"):
return
try:
seg_arg_index = record_args.index("-segment_time")
except ValueError:
raise ValueError(f"Camera {camera_config.name} has no segment_time in \
recording output args, segment args are required for record.")
seg_arg_index = record_args.index("-segment_time")
if seg_arg_index < 0:
raise ValueError(
f"Camera {camera_config.name} has no segment_time in recording output args, segment args are required for record."
)
if int(record_args[seg_arg_index + 1]) > 60:
raise ValueError(f"Camera {camera_config.name} has invalid segment_time output arg, \
segment_time must be 60 or less.")
raise ValueError(
f"Camera {camera_config.name} has invalid segment_time output arg, segment_time must be 60 or less."
)
def verify_zone_objects_are_tracked(camera_config: CameraConfig) -> None:
@@ -1492,28 +1483,41 @@ class FrigateConfig(FrigateBaseModel):
)
version: Optional[str] = Field(default=None, title="Current config version.")
@model_validator(mode="after")
def post_validation(self, info: ValidationInfo) -> Self:
plus_api = None
if isinstance(info.context, dict):
plus_api = info.context.get("plus_api")
def runtime_config(self, plus_api: PlusApi = None) -> FrigateConfig:
"""Merge camera config with globals."""
config = self.model_copy(deep=True)
# Proxy secret substitution
if config.proxy.auth_secret:
config.proxy.auth_secret = config.proxy.auth_secret.format(
**FRIGATE_ENV_VARS
)
# MQTT user/password substitutions
if config.mqtt.user or config.mqtt.password:
config.mqtt.user = config.mqtt.user.format(**FRIGATE_ENV_VARS)
config.mqtt.password = config.mqtt.password.format(**FRIGATE_ENV_VARS)
# set notifications state
self.notifications.enabled_in_config = self.notifications.enabled
config.notifications.enabled_in_config = config.notifications.enabled
# GenAI substitution
if config.genai.api_key:
config.genai.api_key = config.genai.api_key.format(**FRIGATE_ENV_VARS)
# set default min_score for object attributes
for attribute in ALL_ATTRIBUTE_LABELS:
if not self.objects.filters.get(attribute):
self.objects.filters[attribute] = FilterConfig(min_score=0.7)
elif self.objects.filters[attribute].min_score == 0.5:
self.objects.filters[attribute].min_score = 0.7
if not config.objects.filters.get(attribute):
config.objects.filters[attribute] = FilterConfig(min_score=0.7)
elif config.objects.filters[attribute].min_score == 0.5:
config.objects.filters[attribute].min_score = 0.7
# auto detect hwaccel args
if self.ffmpeg.hwaccel_args == "auto":
self.ffmpeg.hwaccel_args = auto_detect_hwaccel()
if config.ffmpeg.hwaccel_args == "auto":
config.ffmpeg.hwaccel_args = auto_detect_hwaccel()
# Global config to propagate down to camera level
global_config = self.model_dump(
global_config = config.model_dump(
include={
"audio": ...,
"birdseye": ...,
@@ -1531,7 +1535,7 @@ class FrigateConfig(FrigateBaseModel):
exclude_unset=True,
)
for name, camera in self.cameras.items():
for name, camera in config.cameras.items():
merged_config = deep_merge(
camera.model_dump(exclude_unset=True), global_config
)
@@ -1540,7 +1544,7 @@ class FrigateConfig(FrigateBaseModel):
)
if camera_config.ffmpeg.hwaccel_args == "auto":
camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args
camera_config.ffmpeg.hwaccel_args = config.ffmpeg.hwaccel_args
for input in camera_config.ffmpeg.inputs:
need_record_fourcc = False and "record" in input.roles
@@ -1553,7 +1557,7 @@ class FrigateConfig(FrigateBaseModel):
stream_info = {"width": 0, "height": 0, "fourcc": None}
try:
stream_info = stream_info_retriever.get_stream_info(
self.ffmpeg, input.path
config.ffmpeg, input.path
)
except Exception:
logger.warn(
@@ -1605,6 +1609,18 @@ class FrigateConfig(FrigateBaseModel):
if camera_config.detect.stationary.interval is None:
camera_config.detect.stationary.interval = stationary_threshold
# FFMPEG input substitution
for input in camera_config.ffmpeg.inputs:
input.path = input.path.format(**FRIGATE_ENV_VARS)
# ONVIF substitution
if camera_config.onvif.user or camera_config.onvif.password:
camera_config.onvif.user = camera_config.onvif.user.format(
**FRIGATE_ENV_VARS
)
camera_config.onvif.password = camera_config.onvif.password.format(
**FRIGATE_ENV_VARS
)
# set config pre-value
camera_config.audio.enabled_in_config = camera_config.audio.enabled
camera_config.record.enabled_in_config = camera_config.record.enabled
@@ -1671,12 +1687,8 @@ class FrigateConfig(FrigateBaseModel):
if not camera_config.live.stream_name:
camera_config.live.stream_name = name
# generate the ffmpeg commands
camera_config.create_ffmpeg_cmds()
self.cameras[name] = camera_config
verify_config_roles(camera_config)
verify_valid_live_stream_name(self, camera_config)
verify_valid_live_stream_name(config, camera_config)
verify_recording_retention(camera_config)
verify_recording_segments_setup_with_reasonable_time(camera_config)
verify_zone_objects_are_tracked(camera_config)
@@ -1684,16 +1696,20 @@ class FrigateConfig(FrigateBaseModel):
verify_autotrack_zones(camera_config)
verify_motion_and_detect(camera_config)
# get list of unique enabled labels for tracking
enabled_labels = set(self.objects.track)
# generate the ffmpeg commands
camera_config.create_ffmpeg_cmds()
config.cameras[name] = camera_config
for camera in self.cameras.values():
# get list of unique enabled labels for tracking
enabled_labels = set(config.objects.track)
for _, camera in config.cameras.items():
enabled_labels.update(camera.objects.track)
self.model.create_colormap(sorted(enabled_labels))
self.model.check_and_load_plus_model(plus_api)
config.model.create_colormap(sorted(enabled_labels))
config.model.check_and_load_plus_model(plus_api)
for key, detector in self.detectors.items():
for key, detector in config.detectors.items():
adapter = TypeAdapter(DetectorConfig)
model_dict = (
detector
@@ -1702,10 +1718,10 @@ class FrigateConfig(FrigateBaseModel):
)
detector_config: DetectorConfig = adapter.validate_python(model_dict)
if detector_config.model is None:
detector_config.model = self.model.model_copy()
detector_config.model = config.model.model_copy()
else:
path = detector_config.model.path
detector_config.model = self.model.model_copy()
detector_config.model = config.model.model_copy()
detector_config.model.path = path
if "path" not in model_dict or len(model_dict.keys()) > 1:
@@ -1715,7 +1731,7 @@ class FrigateConfig(FrigateBaseModel):
merged_model = deep_merge(
detector_config.model.model_dump(exclude_unset=True, warnings="none"),
self.model.model_dump(exclude_unset=True, warnings="none"),
config.model.model_dump(exclude_unset=True, warnings="none"),
)
if "path" not in merged_model:
@@ -1729,9 +1745,9 @@ class FrigateConfig(FrigateBaseModel):
plus_api, detector_config.type
)
detector_config.model.compute_model_hash()
self.detectors[key] = detector_config
config.detectors[key] = detector_config
return self
return config
@field_validator("cameras")
@classmethod
@@ -1743,42 +1759,18 @@ class FrigateConfig(FrigateBaseModel):
return v
@classmethod
def parse_file(cls, config_path, **kwargs):
with open(config_path) as f:
return FrigateConfig.parse(f, **kwargs)
def parse_file(cls, config_file):
with open(config_file) as f:
raw_config = f.read()
if config_file.endswith(YAML_EXT):
config = load_config_with_no_duplicates(raw_config)
elif config_file.endswith(".json"):
config = json.loads(raw_config)
return cls.model_validate(config)
@classmethod
def parse(cls, config, *, is_json=None, **context):
# If config is a file, read its contents.
if hasattr(config, "read"):
fname = getattr(config, "name", None)
config = config.read()
# Try to guess the value of is_json from the file extension.
if is_json is None and fname:
_, ext = os.path.splitext(fname)
if ext in (".yaml", ".yml"):
is_json = False
elif ext == ".json":
is_json = True
# At this point, try to sniff the config string, to guess if it is json or not.
if is_json is None:
is_json = REGEX_JSON.match(config) is not None
# Parse the config into a dictionary.
if is_json:
config = json.load(config)
else:
config = yaml.load(config)
# Validate and return the config dict.
return cls.parse_object(config, **context)
@classmethod
def parse_object(cls, obj: Any, **context):
return cls.model_validate(obj, context=context)
@classmethod
def parse_yaml(cls, config_yaml, **context):
return cls.parse(config_yaml, is_json=False, **context)
def parse_raw(cls, raw_config):
config = load_config_with_no_duplicates(raw_config)
return cls.model_validate(config)

View File

@@ -1,5 +1,3 @@
import re
CONFIG_DIR = "/config"
DEFAULT_DB_PATH = f"{CONFIG_DIR}/frigate.db"
MODEL_CACHE_DIR = f"{CONFIG_DIR}/model_cache"
@@ -9,6 +7,7 @@ RECORD_DIR = f"{BASE_DIR}/recordings"
EXPORT_DIR = f"{BASE_DIR}/exports"
BIRDSEYE_PIPE = "/tmp/cache/birdseye"
CACHE_DIR = "/tmp/cache"
YAML_EXT = (".yaml", ".yml")
FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
PLUS_ENV_VAR = "PLUS_API_KEY"
PLUS_API_HOST = "https://api.frigate.video"
@@ -44,10 +43,8 @@ AUDIO_MIN_CONFIDENCE = 0.5
MAX_WAL_SIZE = 10 # MB
# Ffmpeg constants
# Ffmpeg Presets
DEFAULT_FFMPEG_VERSION = "7.0"
INCLUDED_FFMPEG_VERSIONS = ["7.0", "5.0"]
FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
FFMPEG_HWACCEL_VULKAN = "preset-vulkan"
@@ -57,7 +54,6 @@ FFMPEG_HWACCEL_VULKAN = "preset-vulkan"
REGEX_CAMERA_NAME = r"^[a-zA-Z0-9_-]+$"
REGEX_RTSP_CAMERA_USER_PASS = r":\/\/[a-zA-Z0-9_-]+:[\S]+@"
REGEX_HTTP_CAMERA_USER_PASS = r"user=[a-zA-Z0-9_-]+&password=[\S]+"
REGEX_JSON = re.compile(r"^\s*\{")
# Known Driver Names

View File

@@ -24,6 +24,7 @@ from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.util import preprocess # Assuming this function is available
# Set up logging
logger = logging.getLogger(__name__)
@@ -145,9 +146,17 @@ class HailoDetector(DetectionApi):
f"[detect_raw] Converted tensor_input to numpy array: shape {tensor_input.shape}"
)
input_data = tensor_input
# Preprocess the tensor input using Frigate's preprocess function
processed_tensor = preprocess(
tensor_input, (1, self.h8l_model_height, self.h8l_model_width, 3), np.uint8
)
logger.debug(
f"[detect_raw] Input data for inference shape: {tensor_input.shape}, dtype: {tensor_input.dtype}"
f"[detect_raw] Tensor data and shape after preprocessing: {processed_tensor} {processed_tensor.shape}"
)
input_data = processed_tensor
logger.debug(
f"[detect_raw] Input data for inference shape: {processed_tensor.shape}, dtype: {processed_tensor.dtype}"
)
try:

View File

@@ -1,14 +1,15 @@
import logging
import os
import cv2
import numpy as np
from pydantic import Field
from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import (
BaseDetectorConfig,
ModelTypeEnum,
PixelFormatEnum,
)
logger = logging.getLogger(__name__)
@@ -18,7 +19,6 @@ DETECTOR_KEY = "onnx"
class ONNXDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
device: str = Field(default="AUTO", title="Device Type")
class ONNXDetector(DetectionApi):
@@ -38,11 +38,7 @@ class ONNXDetector(DetectionApi):
path = detector_config.model.path
logger.info(f"ONNX: loading {detector_config.model.path}")
providers = (
["CPUExecutionProvider"]
if detector_config.device == "CPU"
else ort.get_available_providers()
)
providers = ort.get_available_providers()
options = []
for provider in providers:
@@ -53,8 +49,8 @@ class ONNXDetector(DetectionApi):
options.append(
{
"trt_timing_cache_enable": True,
"trt_engine_cache_enable": True,
"trt_timing_cache_path": "/config/model_cache/tensorrt/ort",
"trt_engine_cache_enable": True,
"trt_engine_cache_path": "/config/model_cache/tensorrt/ort/trt-engines",
}
)
@@ -63,7 +59,7 @@ class ONNXDetector(DetectionApi):
options.append(
{
"cache_dir": "/config/model_cache/openvino/ort",
"device_type": detector_config.device,
"device_type": "GPU",
}
)
else:
@@ -77,13 +73,24 @@ class ONNXDetector(DetectionApi):
self.w = detector_config.model.width
self.onnx_model_type = detector_config.model.model_type
self.onnx_model_px = detector_config.model.input_pixel_format
self.onnx_model_shape = detector_config.model.input_tensor
path = detector_config.model.path
logger.info(f"ONNX: {path} loaded")
def detect_raw(self, tensor_input):
model_input_name = self.model.get_inputs()[0].name
model_input_shape = self.model.get_inputs()[0].shape
# adjust input shape
if self.onnx_model_type == ModelTypeEnum.yolonas:
tensor_input = cv2.dnn.blobFromImage(
tensor_input[0],
1.0,
(model_input_shape[3], model_input_shape[2]),
None,
swapRB=self.onnx_model_px == PixelFormatEnum.bgr,
).astype(np.uint8)
tensor_output = self.model.run(None, {model_input_name: tensor_input})
if self.onnx_model_type == ModelTypeEnum.yolonas:

View File

@@ -30,6 +30,12 @@ class OvDetector(DetectionApi):
self.h = detector_config.model.height
self.w = detector_config.model.width
if detector_config.device == "AUTO":
logger.warning(
"OpenVINO AUTO device type is not currently supported. Attempting to use GPU instead."
)
detector_config.device = "GPU"
if not os.path.isfile(detector_config.model.path):
logger.error(f"OpenVino model file {detector_config.model.path} not found.")
raise FileNotFoundError

View File

@@ -9,10 +9,8 @@ from pydantic import Field
from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import (
BaseDetectorConfig,
ModelTypeEnum,
)
from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.util import preprocess
logger = logging.getLogger(__name__)
@@ -76,16 +74,7 @@ class ROCmDetector(DetectionApi):
logger.error("AMD/ROCm: module loading failed, missing ROCm environment?")
raise
if detector_config.conserve_cpu:
logger.info("AMD/ROCm: switching HIP to blocking mode to conserve CPU")
ctypes.CDLL("/opt/rocm/lib/libamdhip64.so").hipSetDeviceFlags(4)
self.h = detector_config.model.height
self.w = detector_config.model.width
self.rocm_model_type = detector_config.model.model_type
self.rocm_model_px = detector_config.model.input_pixel_format
path = detector_config.model.path
mxr_path = os.path.splitext(path)[0] + ".mxr"
if path.endswith(".mxr"):
logger.info(f"AMD/ROCm: loading parsed model from {mxr_path}")
@@ -95,7 +84,6 @@ class ROCmDetector(DetectionApi):
self.model = migraphx.load(mxr_path)
else:
logger.info(f"AMD/ROCm: loading model from {path}")
if path.endswith(".onnx"):
self.model = migraphx.parse_onnx(path)
elif (
@@ -107,51 +95,30 @@ class ROCmDetector(DetectionApi):
self.model = migraphx.parse_tf(path)
else:
raise Exception(f"AMD/ROCm: unknown model format {path}")
logger.info("AMD/ROCm: compiling the model")
self.model.compile(
migraphx.get_target("gpu"), offload_copy=True, fast_math=True
)
logger.info(f"AMD/ROCm: saving parsed model into {mxr_path}")
os.makedirs("/config/model_cache/rocm", exist_ok=True)
migraphx.save(self.model, mxr_path)
logger.info("AMD/ROCm: model loaded")
def detect_raw(self, tensor_input):
model_input_name = self.model.get_parameter_names()[0]
detector_result = self.model.run({model_input_name: tensor_input})[0]
addr = ctypes.cast(detector_result.data_ptr(), ctypes.POINTER(ctypes.c_float))
model_input_shape = tuple(
self.model.get_parameter_shapes()[model_input_name].lens()
)
tensor_input = preprocess(tensor_input, model_input_shape, np.float32)
detector_result = self.model.run({model_input_name: tensor_input})[0]
addr = ctypes.cast(detector_result.data_ptr(), ctypes.POINTER(ctypes.c_float))
# ruff: noqa: F841
tensor_output = np.ctypeslib.as_array(
addr, shape=detector_result.get_shape().lens()
)
if self.rocm_model_type == ModelTypeEnum.yolonas:
predictions = tensor_output
detections = np.zeros((20, 6), np.float32)
for i, prediction in enumerate(predictions):
if i == 20:
break
(_, x_min, y_min, x_max, y_max, confidence, class_id) = prediction
# when running in GPU mode, empty predictions in the output have class_id of -1
if class_id < 0:
break
detections[i] = [
class_id,
confidence,
y_min / self.h,
x_min / self.w,
y_max / self.h,
x_max / self.w,
]
return detections
else:
raise Exception(
f"{self.rocm_model_type} is currently not supported for rocm. See the docs for more info on supported models."
)
raise Exception(
"No models are currently supported for rocm. See the docs for more info."
)

36
frigate/detectors/util.py Normal file
View File

@@ -0,0 +1,36 @@
import logging
import cv2
import numpy as np
logger = logging.getLogger(__name__)
def preprocess(tensor_input, model_input_shape, model_input_element_type):
model_input_shape = tuple(model_input_shape)
assert tensor_input.dtype == np.uint8, f"tensor_input.dtype: {tensor_input.dtype}"
if len(tensor_input.shape) == 3:
tensor_input = tensor_input[np.newaxis, :]
if model_input_element_type == np.uint8:
# nothing to do for uint8 model input
assert (
model_input_shape == tensor_input.shape
), f"model_input_shape: {model_input_shape}, tensor_input.shape: {tensor_input.shape}"
return tensor_input
assert (
model_input_element_type == np.float32
), f"model_input_element_type: {model_input_element_type}"
# tensor_input must be nhwc
assert tensor_input.shape[3] == 3, f"tensor_input.shape: {tensor_input.shape}"
if tensor_input.shape[1:3] != model_input_shape[2:4]:
logger.warn(
f"preprocess: tensor_input.shape {tensor_input.shape} and model_input_shape {model_input_shape} do not match!"
)
# cv2.dnn.blobFromImage is faster than running it through numpy
return cv2.dnn.blobFromImage(
tensor_input[0],
1.0 / 255,
(model_input_shape[3], model_input_shape[2]),
None,
swapRB=False,
)

View File

@@ -91,10 +91,10 @@ PRESETS_HW_ACCEL_DECODE["preset-nvidia-mjpeg"] = PRESETS_HW_ACCEL_DECODE[
PRESETS_HW_ACCEL_SCALE = {
"preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-rpi-64-h265": "-r {0} -vf fps={0},scale={1}:{2}",
FFMPEG_HWACCEL_VAAPI: "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=nv12,eq=gamma=1.05",
FFMPEG_HWACCEL_VAAPI: "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2},hwdownload,format=nv12,eq=gamma=1.05",
FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-jetson-h264": "-r {0}", # scaled in decoder
"preset-jetson-h265": "-r {0}", # scaled in decoder
"preset-rk-h264": "-r {0} -vf scale_rkrga=w={1}:h={2}:format=yuv420p:force_original_aspect_ratio=0,hwmap=mode=read,format=yuv420p",
@@ -185,15 +185,6 @@ def parse_preset_hardware_acceleration_scale(
else:
scale = PRESETS_HW_ACCEL_SCALE.get(arg, PRESETS_HW_ACCEL_SCALE["default"])
if (
",hwdownload,format=nv12,eq=gamma=1.05" in scale
and os.environ.get("FFMPEG_DISABLE_GAMMA_EQUALIZER") is not None
):
scale.replace(
",hwdownload,format=nv12,eq=gamma=1.05",
":format=nv12,hwdownload,format=nv12,format=yuv420p",
)
scale = scale.format(fps, width, height).split(" ")
scale.extend(detect_args)
return scale

View File

@@ -5,12 +5,12 @@ from unittest.mock import patch
import numpy as np
from pydantic import ValidationError
from ruamel.yaml.constructor import DuplicateKeyError
from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import MODEL_CACHE_DIR
from frigate.detectors import DetectorTypeEnum
from frigate.util.builtin import deep_merge
from frigate.plus import PlusApi
from frigate.util.builtin import deep_merge, load_config_with_no_duplicates
class TestConfig(unittest.TestCase):
@@ -64,9 +64,12 @@ class TestConfig(unittest.TestCase):
def test_config_class(self):
frigate_config = FrigateConfig(**self.minimal)
assert "cpu" in frigate_config.detectors.keys()
assert frigate_config.detectors["cpu"].type == DetectorTypeEnum.cpu
assert frigate_config.detectors["cpu"].model.width == 320
assert self.minimal == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert "cpu" in runtime_config.detectors.keys()
assert runtime_config.detectors["cpu"].type == DetectorTypeEnum.cpu
assert runtime_config.detectors["cpu"].model.width == 320
@patch("frigate.detectors.detector_config.load_labels")
def test_detector_custom_model_path(self, mock_labels):
@@ -90,23 +93,24 @@ class TestConfig(unittest.TestCase):
}
frigate_config = FrigateConfig(**(deep_merge(config, self.minimal)))
runtime_config = frigate_config.runtime_config()
assert "cpu" in frigate_config.detectors.keys()
assert "edgetpu" in frigate_config.detectors.keys()
assert "openvino" in frigate_config.detectors.keys()
assert "cpu" in runtime_config.detectors.keys()
assert "edgetpu" in runtime_config.detectors.keys()
assert "openvino" in runtime_config.detectors.keys()
assert frigate_config.detectors["cpu"].type == DetectorTypeEnum.cpu
assert frigate_config.detectors["edgetpu"].type == DetectorTypeEnum.edgetpu
assert frigate_config.detectors["openvino"].type == DetectorTypeEnum.openvino
assert runtime_config.detectors["cpu"].type == DetectorTypeEnum.cpu
assert runtime_config.detectors["edgetpu"].type == DetectorTypeEnum.edgetpu
assert runtime_config.detectors["openvino"].type == DetectorTypeEnum.openvino
assert frigate_config.detectors["cpu"].num_threads == 3
assert frigate_config.detectors["edgetpu"].device is None
assert frigate_config.detectors["openvino"].device is None
assert runtime_config.detectors["cpu"].num_threads == 3
assert runtime_config.detectors["edgetpu"].device is None
assert runtime_config.detectors["openvino"].device is None
assert frigate_config.model.path == "/etc/hosts"
assert frigate_config.detectors["cpu"].model.path == "/cpu_model.tflite"
assert frigate_config.detectors["edgetpu"].model.path == "/edgetpu_model.tflite"
assert frigate_config.detectors["openvino"].model.path == "/etc/hosts"
assert runtime_config.model.path == "/etc/hosts"
assert runtime_config.detectors["cpu"].model.path == "/cpu_model.tflite"
assert runtime_config.detectors["edgetpu"].model.path == "/edgetpu_model.tflite"
assert runtime_config.detectors["openvino"].model.path == "/etc/hosts"
def test_invalid_mqtt_config(self):
config = {
@@ -147,9 +151,11 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert "dog" in frigate_config.cameras["back"].objects.track
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert "dog" in runtime_config.cameras["back"].objects.track
def test_override_birdseye(self):
config = {
@@ -171,10 +177,12 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert not frigate_config.cameras["back"].birdseye.enabled
assert frigate_config.cameras["back"].birdseye.mode is BirdseyeModeEnum.motion
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert not runtime_config.cameras["back"].birdseye.enabled
assert runtime_config.cameras["back"].birdseye.mode is BirdseyeModeEnum.motion
def test_override_birdseye_non_inheritable(self):
config = {
@@ -195,9 +203,11 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].birdseye.enabled
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].birdseye.enabled
def test_inherit_birdseye(self):
config = {
@@ -218,11 +228,13 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].birdseye.enabled
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].birdseye.enabled
assert (
frigate_config.cameras["back"].birdseye.mode is BirdseyeModeEnum.continuous
runtime_config.cameras["back"].birdseye.mode is BirdseyeModeEnum.continuous
)
def test_override_tracked_objects(self):
@@ -245,9 +257,11 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert "cat" in frigate_config.cameras["back"].objects.track
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert "cat" in runtime_config.cameras["back"].objects.track
def test_default_object_filters(self):
config = {
@@ -268,9 +282,11 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert "dog" in frigate_config.cameras["back"].objects.filters
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert "dog" in runtime_config.cameras["back"].objects.filters
def test_inherit_object_filters(self):
config = {
@@ -294,10 +310,12 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert "dog" in frigate_config.cameras["back"].objects.filters
assert frigate_config.cameras["back"].objects.filters["dog"].threshold == 0.7
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert "dog" in runtime_config.cameras["back"].objects.filters
assert runtime_config.cameras["back"].objects.filters["dog"].threshold == 0.7
def test_override_object_filters(self):
config = {
@@ -321,10 +339,12 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert "dog" in frigate_config.cameras["back"].objects.filters
assert frigate_config.cameras["back"].objects.filters["dog"].threshold == 0.7
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert "dog" in runtime_config.cameras["back"].objects.filters
assert runtime_config.cameras["back"].objects.filters["dog"].threshold == 0.7
def test_global_object_mask(self):
config = {
@@ -349,9 +369,11 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
back_camera = frigate_config.cameras["back"]
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
back_camera = runtime_config.cameras["back"]
assert "dog" in back_camera.objects.filters
assert len(back_camera.objects.filters["dog"].raw_mask) == 2
assert len(back_camera.objects.filters["person"].raw_mask) == 1
@@ -397,8 +419,7 @@ class TestConfig(unittest.TestCase):
},
},
}
frigate_config = FrigateConfig(**config)
frigate_config = FrigateConfig(**config).runtime_config()
assert np.array_equal(
frigate_config.cameras["explicit"].motion.mask,
frigate_config.cameras["relative"].motion.mask,
@@ -427,7 +448,10 @@ class TestConfig(unittest.TestCase):
}
frigate_config = FrigateConfig(**config)
assert "-rtsp_transport" in frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert "-rtsp_transport" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
def test_ffmpeg_params_global(self):
config = {
@@ -452,9 +476,11 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert "-re" in frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert "-re" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
def test_ffmpeg_params_camera(self):
config = {
@@ -480,10 +506,12 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert "-re" in frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
assert "test" not in frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert "-re" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
assert "test" not in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
def test_ffmpeg_params_input(self):
config = {
@@ -513,12 +541,14 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert "-re" in frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
assert "test" in frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
assert "test2" not in frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
assert "test3" not in frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert "-re" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
assert "test" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
assert "test2" not in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
assert "test3" not in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
def test_inherit_clips_retention(self):
config = {
@@ -539,9 +569,11 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].record.alerts.retain.days == 20
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].record.alerts.retain.days == 20
def test_roles_listed_twice_throws_error(self):
config = {
@@ -625,12 +657,14 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert isinstance(
frigate_config.cameras["back"].zones["test"].contour, np.ndarray
runtime_config.cameras["back"].zones["test"].contour, np.ndarray
)
assert frigate_config.cameras["back"].zones["test"].color != (0, 0, 0)
assert runtime_config.cameras["back"].zones["test"].color != (0, 0, 0)
def test_zone_relative_matches_explicit(self):
config = {
@@ -665,8 +699,7 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
frigate_config = FrigateConfig(**config).runtime_config()
assert np.array_equal(
frigate_config.cameras["back"].zones["explicit"].contour,
frigate_config.cameras["back"].zones["relative"].contour,
@@ -696,7 +729,10 @@ class TestConfig(unittest.TestCase):
}
frigate_config = FrigateConfig(**config)
ffmpeg_cmds = frigate_config.cameras["back"].ffmpeg_cmds
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
ffmpeg_cmds = runtime_config.cameras["back"].ffmpeg_cmds
assert len(ffmpeg_cmds) == 1
assert "clips" not in ffmpeg_cmds[0]["roles"]
@@ -724,7 +760,10 @@ class TestConfig(unittest.TestCase):
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].detect.max_disappeared == 5 * 5
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].detect.max_disappeared == 5 * 5
def test_motion_frame_height_wont_go_below_120(self):
config = {
@@ -749,7 +788,10 @@ class TestConfig(unittest.TestCase):
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].motion.frame_height == 100
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].motion.frame_height == 100
def test_motion_contour_area_dynamic(self):
config = {
@@ -774,7 +816,10 @@ class TestConfig(unittest.TestCase):
}
frigate_config = FrigateConfig(**config)
assert round(frigate_config.cameras["back"].motion.contour_area) == 10
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert round(runtime_config.cameras["back"].motion.contour_area) == 10
def test_merge_labelmap(self):
config = {
@@ -800,7 +845,10 @@ class TestConfig(unittest.TestCase):
}
frigate_config = FrigateConfig(**config)
assert frigate_config.model.merged_labelmap[7] == "truck"
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.model.merged_labelmap[7] == "truck"
def test_default_labelmap_empty(self):
config = {
@@ -825,7 +873,10 @@ class TestConfig(unittest.TestCase):
}
frigate_config = FrigateConfig(**config)
assert frigate_config.model.merged_labelmap[0] == "person"
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.model.merged_labelmap[0] == "person"
def test_default_labelmap(self):
config = {
@@ -851,7 +902,10 @@ class TestConfig(unittest.TestCase):
}
frigate_config = FrigateConfig(**config)
assert frigate_config.model.merged_labelmap[0] == "person"
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.model.merged_labelmap[0] == "person"
def test_plus_labelmap(self):
with open("/config/model_cache/test", "w") as f:
@@ -882,7 +936,10 @@ class TestConfig(unittest.TestCase):
}
frigate_config = FrigateConfig(**config)
assert frigate_config.model.merged_labelmap[0] == "amazon"
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config(PlusApi())
assert runtime_config.model.merged_labelmap[0] == "amazon"
def test_fails_on_invalid_role(self):
config = {
@@ -939,7 +996,8 @@ class TestConfig(unittest.TestCase):
},
}
self.assertRaises(ValueError, lambda: FrigateConfig(**config))
frigate_config = FrigateConfig(**config)
self.assertRaises(ValueError, lambda: frigate_config.runtime_config())
def test_works_on_missing_role_multiple_cams(self):
config = {
@@ -986,7 +1044,8 @@ class TestConfig(unittest.TestCase):
},
}
FrigateConfig(**config)
frigate_config = FrigateConfig(**config)
frigate_config.runtime_config()
def test_global_detect(self):
config = {
@@ -1010,10 +1069,12 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].detect.max_disappeared == 1
assert frigate_config.cameras["back"].detect.height == 1080
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].detect.max_disappeared == 1
assert runtime_config.cameras["back"].detect.height == 1080
def test_default_detect(self):
config = {
@@ -1036,10 +1097,12 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].detect.max_disappeared == 25
assert frigate_config.cameras["back"].detect.height == 720
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].detect.max_disappeared == 25
assert runtime_config.cameras["back"].detect.height == 720
def test_global_detect_merge(self):
config = {
@@ -1063,11 +1126,13 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].detect.max_disappeared == 1
assert frigate_config.cameras["back"].detect.height == 1080
assert frigate_config.cameras["back"].detect.width == 1920
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].detect.max_disappeared == 1
assert runtime_config.cameras["back"].detect.height == 1080
assert runtime_config.cameras["back"].detect.width == 1920
def test_global_snapshots(self):
config = {
@@ -1094,10 +1159,12 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].snapshots.enabled
assert frigate_config.cameras["back"].snapshots.height == 100
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].snapshots.enabled
assert runtime_config.cameras["back"].snapshots.height == 100
def test_default_snapshots(self):
config = {
@@ -1120,10 +1187,12 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].snapshots.bounding_box
assert frigate_config.cameras["back"].snapshots.quality == 70
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].snapshots.bounding_box
assert runtime_config.cameras["back"].snapshots.quality == 70
def test_global_snapshots_merge(self):
config = {
@@ -1151,11 +1220,13 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].snapshots.bounding_box is False
assert frigate_config.cameras["back"].snapshots.height == 150
assert frigate_config.cameras["back"].snapshots.enabled
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].snapshots.bounding_box is False
assert runtime_config.cameras["back"].snapshots.height == 150
assert runtime_config.cameras["back"].snapshots.enabled
def test_global_jsmpeg(self):
config = {
@@ -1179,9 +1250,11 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].live.quality == 4
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].live.quality == 4
def test_default_live(self):
config = {
@@ -1204,9 +1277,11 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].live.quality == 8
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].live.quality == 8
def test_global_live_merge(self):
config = {
@@ -1233,10 +1308,12 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].live.quality == 7
assert frigate_config.cameras["back"].live.height == 480
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].live.quality == 7
assert runtime_config.cameras["back"].live.height == 480
def test_global_timestamp_style(self):
config = {
@@ -1260,9 +1337,11 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].timestamp_style.position == "bl"
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].timestamp_style.position == "bl"
def test_default_timestamp_style(self):
config = {
@@ -1285,9 +1364,11 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].timestamp_style.position == "tl"
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].timestamp_style.position == "tl"
def test_global_timestamp_style_merge(self):
config = {
@@ -1312,10 +1393,12 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].timestamp_style.position == "bl"
assert frigate_config.cameras["back"].timestamp_style.thickness == 4
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].timestamp_style.position == "bl"
assert runtime_config.cameras["back"].timestamp_style.thickness == 4
def test_allow_retain_to_be_a_decimal(self):
config = {
@@ -1339,9 +1422,11 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].snapshots.retain.default == 1.5
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].snapshots.retain.default == 1.5
def test_fails_on_bad_camera_name(self):
config = {
@@ -1366,7 +1451,11 @@ class TestConfig(unittest.TestCase):
},
}
self.assertRaises(ValidationError, lambda: FrigateConfig(**config).cameras)
frigate_config = FrigateConfig(**config)
self.assertRaises(
ValidationError, lambda: frigate_config.runtime_config().cameras
)
def test_fails_on_bad_segment_time(self):
config = {
@@ -1394,9 +1483,11 @@ class TestConfig(unittest.TestCase):
},
}
frigate_config = FrigateConfig(**config)
self.assertRaises(
ValueError,
lambda: FrigateConfig(**config).ffmpeg.output_args.record,
lambda: frigate_config.runtime_config().ffmpeg.output_args.record,
)
def test_fails_zone_defines_untracked_object(self):
@@ -1428,7 +1519,9 @@ class TestConfig(unittest.TestCase):
},
}
self.assertRaises(ValueError, lambda: FrigateConfig(**config).cameras)
frigate_config = FrigateConfig(**config)
self.assertRaises(ValueError, lambda: frigate_config.runtime_config().cameras)
def test_fails_duplicate_keys(self):
raw_config = """
@@ -1444,7 +1537,7 @@ class TestConfig(unittest.TestCase):
"""
self.assertRaises(
DuplicateKeyError, lambda: FrigateConfig.parse_yaml(raw_config)
ValueError, lambda: load_config_with_no_duplicates(raw_config)
)
def test_object_filter_ratios_work(self):
@@ -1469,11 +1562,13 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert "dog" in frigate_config.cameras["back"].objects.filters
assert frigate_config.cameras["back"].objects.filters["dog"].min_ratio == 0.2
assert frigate_config.cameras["back"].objects.filters["dog"].max_ratio == 10.1
assert config == frigate_config.model_dump(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert "dog" in runtime_config.cameras["back"].objects.filters
assert runtime_config.cameras["back"].objects.filters["dog"].min_ratio == 0.2
assert runtime_config.cameras["back"].objects.filters["dog"].max_ratio == 10.1
def test_valid_movement_weights(self):
config = {
@@ -1496,9 +1591,10 @@ class TestConfig(unittest.TestCase):
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.cameras["back"].onvif.autotracking.movement_weights == [
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].onvif.autotracking.movement_weights == [
"0.0",
"1.0",
"1.23",

View File

@@ -36,13 +36,16 @@ class TestFfmpegPresets(unittest.TestCase):
}
def test_default_ffmpeg(self):
FrigateConfig(**self.default_ffmpeg)
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert self.default_ffmpeg == frigate_config.dict(exclude_unset=True)
def test_ffmpeg_hwaccel_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["hwaccel_args"] = (
"preset-rpi-64-h264"
)
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-rpi-64-h264" not in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
@@ -55,6 +58,7 @@ class TestFfmpegPresets(unittest.TestCase):
"-other-hwaccel args"
)
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "-other-hwaccel args" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
@@ -69,11 +73,12 @@ class TestFfmpegPresets(unittest.TestCase):
"fps": 10,
}
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-nvidia-h264" not in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
assert (
"fps=10,scale_cuda=w=2560:h=1920,hwdownload,format=nv12,eq=gamma=1.05"
"fps=10,scale_cuda=w=2560:h=1920:format=nv12,hwdownload,format=nv12,format=yuv420p"
in (" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]))
)
@@ -84,6 +89,8 @@ class TestFfmpegPresets(unittest.TestCase):
"preset-rtsp-generic"
)
frigate_preset_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
frigate_preset_config.cameras["back"].create_ffmpeg_cmds()
assert (
# Ignore global and user_agent args in comparison
frigate_preset_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
@@ -95,6 +102,7 @@ class TestFfmpegPresets(unittest.TestCase):
"preset-rtmp-generic"
)
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-rtmp-generic" not in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
@@ -109,6 +117,7 @@ class TestFfmpegPresets(unittest.TestCase):
argsList = defaultArgsList + ["-some", "arg with space"]
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["input_args"] = argsString
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert set(argsList).issubset(
frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
)
@@ -116,6 +125,7 @@ class TestFfmpegPresets(unittest.TestCase):
def test_ffmpeg_input_not_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["input_args"] = "-some inputs"
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "-some inputs" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
@@ -125,6 +135,7 @@ class TestFfmpegPresets(unittest.TestCase):
"preset-record-generic-audio-aac"
)
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-record-generic-audio-aac" not in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
@@ -134,9 +145,10 @@ class TestFfmpegPresets(unittest.TestCase):
def test_ffmpeg_output_record_not_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"]["record"] = (
"-some output -segment_time 10"
"-some output"
)
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "-some output" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)

View File

@@ -345,7 +345,7 @@ class TestHttp(unittest.TestCase):
def test_config(self):
app = create_app(
FrigateConfig(**self.minimal_config),
FrigateConfig(**self.minimal_config).runtime_config(),
self.db,
None,
None,
@@ -363,7 +363,7 @@ class TestHttp(unittest.TestCase):
def test_recordings(self):
app = create_app(
FrigateConfig(**self.minimal_config),
FrigateConfig(**self.minimal_config).runtime_config(),
self.db,
None,
None,
@@ -385,7 +385,7 @@ class TestHttp(unittest.TestCase):
stats = Mock(spec=StatsEmitter)
stats.get_latest_stats.return_value = self.test_stats
app = create_app(
FrigateConfig(**self.minimal_config),
FrigateConfig(**self.minimal_config).runtime_config(),
self.db,
None,
None,

View File

@@ -44,10 +44,9 @@ class TimelineProcessor(threading.Thread):
continue
if input_type == EventTypeEnum.tracked_object:
if prev_event_data is not None and event_data is not None:
self.handle_object_detection(
camera, event_type, prev_event_data, event_data
)
self.handle_object_detection(
camera, event_type, prev_event_data, event_data
)
elif input_type == EventTypeEnum.api:
self.handle_api_entry(camera, event_type, event_data)

View File

@@ -9,12 +9,14 @@ import queue
import re
import shlex
import urllib.parse
from collections import Counter
from collections.abc import Mapping
from pathlib import Path
from typing import Any, Optional, Tuple
import numpy as np
import pytz
import yaml
from ruamel.yaml import YAML
from tzlocal import get_localzone
from zoneinfo import ZoneInfoNotFoundError
@@ -87,6 +89,34 @@ def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dic
return merged
def load_config_with_no_duplicates(raw_config) -> dict:
"""Get config ensuring duplicate keys are not allowed."""
# https://stackoverflow.com/a/71751051
# important to use SafeLoader here to avoid RCE
class PreserveDuplicatesLoader(yaml.loader.SafeLoader):
pass
def map_constructor(loader, node, deep=False):
keys = [loader.construct_object(node, deep=deep) for node, _ in node.value]
vals = [loader.construct_object(node, deep=deep) for _, node in node.value]
key_count = Counter(keys)
data = {}
for key, val in zip(keys, vals):
if key_count[key] > 1:
raise ValueError(
f"Config input {key} is defined multiple times for the same field, this is not allowed."
)
else:
data[key] = val
return data
PreserveDuplicatesLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, map_constructor
)
return yaml.load(raw_config, PreserveDuplicatesLoader)
def clean_camera_user_pass(line: str) -> str:
"""Removes user and password from line."""
rtsp_cleaned = re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line)

View File

@@ -280,7 +280,10 @@ def process(path, label, output, debug_path):
json_config["cameras"]["camera"]["ffmpeg"]["inputs"][0]["path"] = c
frigate_config = FrigateConfig(**json_config)
process_clip = ProcessClip(c, frame_shape, frigate_config)
runtime_config = frigate_config.runtime_config()
runtime_config.cameras["camera"].create_ffmpeg_cmds()
process_clip = ProcessClip(c, frame_shape, runtime_config)
process_clip.load_frames()
process_clip.process_frames(object_detector, objects_to_track=[label])

9
web/package-lock.json generated
View File

@@ -31,7 +31,7 @@
"@radix-ui/react-toggle-group": "^1.1.0",
"@radix-ui/react-tooltip": "^1.1.2",
"apexcharts": "^3.52.0",
"axios": "^1.7.3",
"axios": "^1.7.4",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.1",
"cmdk": "^1.0.0",
@@ -3398,10 +3398,9 @@
}
},
"node_modules/axios": {
"version": "1.7.3",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.7.3.tgz",
"integrity": "sha512-Ar7ND9pU99eJ9GpoGQKhKf58GpUOgnzuaB7ueNQ5BMi0p+LZ5oaEnfF999fAArcTIBwXTCHAmGcHOZJaWPq9Nw==",
"license": "MIT",
"version": "1.7.4",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.7.4.tgz",
"integrity": "sha512-DukmaFRnY6AzAALSH4J2M3k6PkaC+MfaAGdEERRWcC9q3/TWQwLpHR8ZRLKTdQ3aBDL64EdluRDjJqKw+BPZEw==",
"dependencies": {
"follow-redirects": "^1.15.6",
"form-data": "^4.0.0",

View File

@@ -37,7 +37,7 @@
"@radix-ui/react-toggle-group": "^1.1.0",
"@radix-ui/react-tooltip": "^1.1.2",
"apexcharts": "^3.52.0",
"axios": "^1.7.3",
"axios": "^1.7.4",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.1",
"cmdk": "^1.0.0",

View File

@@ -43,6 +43,7 @@ type SearchFilterGroupProps = {
className: string;
filters?: SearchFilters[];
filter?: SearchFilter;
searchTerm: string;
filterList?: FilterList;
onUpdateFilter: (filter: SearchFilter) => void;
};
@@ -50,6 +51,7 @@ export default function SearchFilterGroup({
className,
filters = DEFAULT_REVIEW_FILTERS,
filter,
searchTerm,
filterList,
onUpdateFilter,
}: SearchFilterGroupProps) {
@@ -211,7 +213,7 @@ export default function SearchFilterGroup({
)}
{config?.semantic_search?.enabled &&
filters.includes("source") &&
!filter?.search_type?.includes("similarity") && (
!searchTerm.includes("similarity:") && (
<SearchTypeButton
selectedSearchSources={
filter?.search_type ?? ["thumbnail", "description"]
@@ -912,7 +914,7 @@ export function SearchTypeContent({
<div className="my-2.5 flex flex-col gap-2.5">
<FilterSwitch
label="Thumbnail Image"
isChecked={currentSearchSources?.includes("thumbnail") ?? false}
isChecked={selectedSearchSources?.includes("thumbnail") ?? false}
onCheckedChange={(isChecked) => {
const updatedSources = currentSearchSources
? [...currentSearchSources]

View File

@@ -180,7 +180,11 @@ export default function InputWithTags({
const createFilter = useCallback(
(type: FilterType, value: string) => {
if (allSuggestions[type as FilterType]?.includes(value)) {
if (
allSuggestions[type as keyof SearchFilter]?.includes(value) ||
type === "before" ||
type === "after"
) {
const newFilters = { ...filters };
let timestamp = 0;
@@ -262,7 +266,9 @@ export default function InputWithTags({
(filterType: FilterType, filterValue: string) => {
const trimmedValue = filterValue.trim();
if (
allSuggestions[filterType]?.includes(trimmedValue) ||
allSuggestions[filterType as keyof SearchFilter]?.includes(
trimmedValue,
) ||
((filterType === "before" || filterType === "after") &&
trimmedValue.match(/^\d{8}$/))
) {
@@ -421,14 +427,14 @@ export default function InputWithTags({
}, [currentFilterType, inputValue, updateSuggestions]);
useEffect(() => {
if (filters?.search_type && filters?.search_type.includes("similarity")) {
if (search?.startsWith("similarity:")) {
setIsSimilaritySearch(true);
setInputValue("");
} else {
setIsSimilaritySearch(false);
setInputValue(search || "");
}
}, [filters, search]);
}, [search]);
return (
<>
@@ -577,57 +583,54 @@ export default function InputWithTags({
</span>
)}
{Object.entries(filters).map(([filterType, filterValues]) =>
Array.isArray(filterValues)
? filterValues
.filter(() => filterType !== "query")
.filter(() => !filterValues.includes("similarity"))
.map((value, index) => (
<span
key={`${filterType}-${index}`}
className="inline-flex items-center whitespace-nowrap rounded-full bg-green-100 px-2 py-0.5 text-sm capitalize text-green-800"
>
{filterType.replaceAll("_", " ")}:{" "}
{value.replaceAll("_", " ")}
<button
onClick={() =>
removeFilter(filterType as FilterType, value)
}
className="ml-1 focus:outline-none"
aria-label={`Remove ${filterType}:${value.replaceAll("_", " ")} filter`}
>
<LuX className="h-3 w-3" />
</button>
</span>
))
: filterType !== "event_id" && (
<span
key={filterType}
className="inline-flex items-center whitespace-nowrap rounded-full bg-green-100 px-2 py-0.5 text-sm capitalize text-green-800"
Array.isArray(filterValues) ? (
filterValues.map((value, index) => (
<span
key={`${filterType}-${index}`}
className="inline-flex items-center whitespace-nowrap rounded-full bg-green-100 px-2 py-0.5 text-sm capitalize text-green-800"
>
{filterType.replaceAll("_", " ")}:{" "}
{value.replaceAll("_", " ")}
<button
onClick={() =>
removeFilter(filterType as FilterType, value)
}
className="ml-1 focus:outline-none"
aria-label={`Remove ${filterType}:${value.replaceAll("_", " ")} filter`}
>
{filterType}:
{filterType === "before" || filterType === "after"
? new Date(
(filterType === "before"
? (filterValues as number) + 1
: (filterValues as number)) * 1000,
).toLocaleDateString(
window.navigator?.language || "en-US",
)
: filterValues}
<button
onClick={() =>
removeFilter(
filterType as FilterType,
filterValues as string | number,
)
}
className="ml-1 focus:outline-none"
aria-label={`Remove ${filterType}:${filterValues} filter`}
>
<LuX className="h-3 w-3" />
</button>
</span>
),
<LuX className="h-3 w-3" />
</button>
</span>
))
) : (
<span
key={filterType}
className="inline-flex items-center whitespace-nowrap rounded-full bg-green-100 px-2 py-0.5 text-sm capitalize text-green-800"
>
{filterType}:
{filterType === "before" || filterType === "after"
? new Date(
(filterType === "before"
? (filterValues as number) + 1
: (filterValues as number)) * 1000,
).toLocaleDateString(
window.navigator?.language || "en-US",
)
: filterValues}
<button
onClick={() =>
removeFilter(
filterType as FilterType,
filterValues as string | number,
)
}
className="ml-1 focus:outline-none"
aria-label={`Remove ${filterType}:${filterValues} filter`}
>
<LuX className="h-3 w-3" />
</button>
</span>
),
)}
</div>
</CommandGroup>

View File

@@ -121,7 +121,7 @@ export function AnnotationSettingsPane({
}
return (
<div className="mb-3 space-y-3 rounded-lg border border-secondary-foreground bg-background_alt p-2">
<div className="space-y-3 rounded-lg border border-secondary-foreground bg-background_alt p-2">
<Heading as="h4" className="my-2">
Annotation Settings
</Heading>
@@ -152,8 +152,8 @@ export function AnnotationSettingsPane({
render={({ field }) => (
<FormItem>
<FormLabel>Annotation Offset</FormLabel>
<div className="flex flex-col gap-3 md:flex-row-reverse md:gap-8">
<div className="flex flex-row items-center gap-3 rounded-lg bg-destructive/50 p-3 text-sm text-primary-variant md:my-0 md:my-5">
<div className="flex flex-col gap-8 md:flex-row-reverse">
<div className="my-5 flex flex-row items-center gap-3 rounded-lg bg-destructive/50 p-3 text-sm text-primary-variant md:my-0">
<PiWarningCircle className="size-24" />
<div>
This data comes from your camera's detect feed but is
@@ -161,7 +161,7 @@ export function AnnotationSettingsPane({
unlikely that the two streams are perfectly in sync. As a
result, the bounding box and the footage will not line up
perfectly. However, the <code>annotation_offset</code>{" "}
field can be used to adjust this.
field in your config can be used to adjust this.
<div className="mt-2 flex items-center text-primary">
<Link
to="https://docs.frigate.video/configuration/reference"

View File

@@ -357,7 +357,10 @@ export default function ObjectLifecycle({
)}
<div className="relative flex flex-col items-center justify-center">
<Carousel className="m-0 w-full" setApi={setMainApi}>
<Carousel
className={cn("m-0 w-full", fullscreen && isDesktop && "w-[75%]")}
setApi={setMainApi}
>
<CarouselContent>
{eventSequence.map((item, index) => (
<CarouselItem key={index}>

View File

@@ -234,7 +234,7 @@ export default function ReviewDetailDialog({
)}
{pane == "details" && selectedEvent && (
<div className="mt-0 flex size-full flex-col gap-2">
<div className="scrollbar-container overflow-x-none mt-0 flex size-full flex-col gap-2 overflow-y-auto overflow-x-hidden">
<ObjectLifecycle event={selectedEvent} setPane={setPane} />
</div>
)}
@@ -370,9 +370,7 @@ function EventItem({
<Chip
className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500"
onClick={() => {
navigate(
`/explore?search_type=similarity&event_id=${event.id}`,
);
navigate(`/explore?similarity_search_id=${event.id}`);
}}
>
<FaImages className="size-4 text-white" />

View File

@@ -125,7 +125,7 @@ export function useSearchEffect(
const remove = callback(param[1]);
if (remove) {
setSearchParams(undefined, { replace: true });
setSearchParams();
}
}, [param, callback, setSearchParams]);
}

View File

@@ -31,7 +31,6 @@ function ConfigEditor() {
const editorRef = useRef<monaco.editor.IStandaloneCodeEditor | null>(null);
const modelRef = useRef<monaco.editor.ITextModel | null>(null);
const configRef = useRef<HTMLDivElement | null>(null);
const schemaConfiguredRef = useRef(false);
const onHandleSaveConfig = useCallback(
async (save_option: SaveOptions) => {
@@ -80,59 +79,50 @@ function ConfigEditor() {
return;
}
const modelUri = monaco.Uri.parse(
`a://b/api/config/schema_${Date.now()}.json`,
);
// Configure Monaco YAML schema only once
if (!schemaConfiguredRef.current) {
configureMonacoYaml(monaco, {
enableSchemaRequest: true,
hover: true,
completion: true,
validate: true,
format: true,
schemas: [
{
uri: `${apiHost}api/config/schema.json`,
fileMatch: [String(modelUri)],
},
],
});
schemaConfiguredRef.current = true;
if (modelRef.current != null) {
// we don't need to recreate the editor if it already exists
editorRef.current?.layout();
return;
}
if (!modelRef.current) {
modelRef.current = monaco.editor.createModel(config, "yaml", modelUri);
const modelUri = monaco.Uri.parse("a://b/api/config/schema.json");
if (monaco.editor.getModels().length > 0) {
modelRef.current = monaco.editor.getModel(modelUri);
} else {
modelRef.current.setValue(config);
modelRef.current = monaco.editor.createModel(config, "yaml", modelUri);
}
configureMonacoYaml(monaco, {
enableSchemaRequest: true,
hover: true,
completion: true,
validate: true,
format: true,
schemas: [
{
uri: `${apiHost}api/config/schema.json`,
fileMatch: [String(modelUri)],
},
],
});
const container = configRef.current;
if (container && !editorRef.current) {
if (container != null) {
editorRef.current = monaco.editor.create(container, {
language: "yaml",
model: modelRef.current,
scrollBeyondLastLine: false,
theme: (systemTheme || theme) == "dark" ? "vs-dark" : "vs-light",
});
} else if (editorRef.current) {
editorRef.current.setModel(modelRef.current);
}
return () => {
if (editorRef.current) {
editorRef.current.dispose();
editorRef.current = null;
}
if (modelRef.current) {
modelRef.current.dispose();
modelRef.current = null;
}
schemaConfiguredRef.current = false;
configRef.current = null;
modelRef.current = null;
};
}, [config, apiHost, systemTheme, theme]);
});
// monitoring state

View File

@@ -14,10 +14,6 @@ import {
ReviewSummary,
SegmentedReviewData,
} from "@/types/review";
import {
getBeginningOfDayTimestamp,
getEndOfDayTimestamp,
} from "@/utils/dateUtil";
import EventView from "@/views/events/EventView";
import { RecordingView } from "@/views/recording/RecordingView";
import axios from "axios";
@@ -47,17 +43,10 @@ export default function Events() {
.get(`review/${reviewId}`)
.then((resp) => {
if (resp.status == 200 && resp.data) {
const startTime = resp.data.start_time - REVIEW_PADDING;
const date = new Date(startTime * 1000);
setReviewFilter({
after: getBeginningOfDayTimestamp(date),
before: getEndOfDayTimestamp(date),
});
setRecording(
{
camera: resp.data.camera,
startTime,
startTime: resp.data.start_time - REVIEW_PADDING,
severity: resp.data.severity,
},
true,

View File

@@ -1,60 +1,93 @@
import { useApiFilterArgs } from "@/hooks/use-api-filter";
import { useCameraPreviews } from "@/hooks/use-camera-previews";
import { useOverlayState, useSearchEffect } from "@/hooks/use-overlay-state";
import { FrigateConfig } from "@/types/frigateConfig";
import { RecordingStartingPoint } from "@/types/record";
import { SearchFilter, SearchQuery, SearchResult } from "@/types/search";
import { TimeRange } from "@/types/timeline";
import { RecordingView } from "@/views/recording/RecordingView";
import SearchView from "@/views/search/SearchView";
import { useCallback, useEffect, useMemo, useState } from "react";
import { TbExclamationCircle } from "react-icons/tb";
import useSWR from "swr";
import useSWRInfinite from "swr/infinite";
const API_LIMIT = 25;
export default function Explore() {
const { data: config } = useSWR<FrigateConfig>("config", {
revalidateOnFocus: false,
});
// search field handler
const [search, setSearch] = useState("");
const [searchTerm, setSearchTerm] = useState("");
const [recording, setRecording] =
useOverlayState<RecordingStartingPoint>("recording");
// search filter
const similaritySearch = useMemo(() => {
if (!searchTerm.includes("similarity:")) {
return undefined;
}
return searchTerm.split(":")[1];
}, [searchTerm]);
const [searchFilter, setSearchFilter, searchSearchParams] =
useApiFilterArgs<SearchFilter>();
const searchTerm = useMemo(
() => searchSearchParams?.["query"] || "",
[searchSearchParams],
);
// search api
const similaritySearch = useMemo(
() => searchSearchParams["search_type"] == "similarity",
[searchSearchParams],
);
useSearchEffect("similarity_search_id", (similarityId) => {
setSearch(`similarity:${similarityId}`);
// @ts-expect-error we want to clear this
setSearchFilter({ ...searchFilter, similarity_search_id: undefined });
return false;
});
useEffect(() => {
if (!searchTerm && !search) {
return;
}
// switch back to normal search when query is entered
setSearchFilter({
...searchFilter,
search_type:
similaritySearch && search ? undefined : searchFilter?.search_type,
event_id: similaritySearch && search ? undefined : searchFilter?.event_id,
query: search.length > 0 ? search : undefined,
});
// only update when search is updated
// eslint-disable-next-line react-hooks/exhaustive-deps
setSearchTerm(search);
}, [search]);
const searchQuery: SearchQuery = useMemo(() => {
// no search parameters
if (searchSearchParams && Object.keys(searchSearchParams).length === 0) {
return null;
if (similaritySearch) {
return [
"events/search",
{
query: similaritySearch,
cameras: searchSearchParams["cameras"],
labels: searchSearchParams["labels"],
sub_labels: searchSearchParams["subLabels"],
zones: searchSearchParams["zones"],
before: searchSearchParams["before"],
after: searchSearchParams["after"],
include_thumbnails: 0,
search_type: "similarity",
},
];
}
// parameters, but no search term and not similarity
if (
searchSearchParams &&
Object.keys(searchSearchParams).length !== 0 &&
!searchTerm &&
!similaritySearch
) {
if (searchTerm) {
return [
"events/search",
{
query: searchTerm,
cameras: searchSearchParams["cameras"],
labels: searchSearchParams["labels"],
sub_labels: searchSearchParams["subLabels"],
zones: searchSearchParams["zones"],
before: searchSearchParams["before"],
after: searchSearchParams["after"],
search_type: searchSearchParams["search_type"],
include_thumbnails: 0,
},
];
}
if (searchSearchParams && Object.keys(searchSearchParams).length !== 0) {
return [
"events",
{
@@ -73,38 +106,15 @@ export default function Explore() {
];
}
// parameters and search term
if (!similaritySearch) {
setSearch(searchTerm);
}
return [
"events/search",
{
query: similaritySearch ? undefined : searchTerm,
cameras: searchSearchParams["cameras"],
labels: searchSearchParams["labels"],
sub_labels: searchSearchParams["subLabels"],
zones: searchSearchParams["zones"],
before: searchSearchParams["before"],
after: searchSearchParams["after"],
search_type: searchSearchParams["search_type"],
event_id: searchSearchParams["event_id"],
include_thumbnails: 0,
},
];
return null;
}, [searchTerm, searchSearchParams, similaritySearch]);
// paging
// usually slow only on first run while downloading models
const [isSlowLoading, setIsSlowLoading] = useState(false);
const getKey = (
pageIndex: number,
previousPageData: SearchResult[] | null,
): SearchQuery => {
if (isSlowLoading && !similaritySearch) return null;
if (previousPageData && !previousPageData.length) return null; // reached the end
if (!searchQuery) return null;
@@ -128,12 +138,6 @@ export default function Explore() {
{
revalidateFirstPage: true,
revalidateAll: false,
onLoadingSlow: () => {
if (!similaritySearch) {
setIsSlowLoading(true);
}
},
loadingTimeout: 10000,
},
);
@@ -164,40 +168,109 @@ export default function Explore() {
}
}, [isReachingEnd, isLoadingMore, setSize, size, searchResults, searchQuery]);
return (
<>
{isSlowLoading && !similaritySearch ? (
<div className="absolute inset-0 left-1/2 top-1/2 flex h-96 w-96 -translate-x-1/2 -translate-y-1/2">
<div className="flex flex-col items-center justify-center rounded-lg bg-background/50 p-5">
<p className="my-5 text-lg">Search Unavailable</p>
<TbExclamationCircle className="mb-3 size-10" />
<p className="max-w-96 text-center">
If this is your first time using Search, be patient while Frigate
downloads the necessary embeddings models. Check Frigate logs.
</p>
</div>
</div>
) : (
<SearchView
search={search}
searchTerm={searchTerm}
searchFilter={searchFilter}
searchResults={searchResults}
isLoading={(isLoadingInitialData || isLoadingMore) ?? true}
setSearch={setSearch}
setSimilaritySearch={(search) => {
setSearchFilter({
...searchFilter,
search_type: ["similarity"],
event_id: search.id,
});
}}
setSearchFilter={setSearchFilter}
onUpdateFilter={setSearchFilter}
loadMore={loadMore}
hasMore={!isReachingEnd}
/>
)}
</>
// previews
const previewTimeRange = useMemo<TimeRange>(() => {
if (!searchResults) {
return { after: 0, before: 0 };
}
return {
after: Math.min(...searchResults.map((res) => res.start_time)),
before: Math.max(
...searchResults.map((res) => res.end_time ?? Date.now() / 1000),
),
};
}, [searchResults]);
const allPreviews = useCameraPreviews(previewTimeRange, {
autoRefresh: false,
fetchPreviews: searchResults != undefined,
});
// selection
const onOpenSearch = useCallback(
(item: SearchResult) => {
setRecording({
camera: item.camera,
startTime: item.start_time,
severity: "alert",
});
},
[setRecording],
);
const selectedReviewData = useMemo(() => {
if (!recording) {
return undefined;
}
if (!config) {
return undefined;
}
if (!searchResults) {
return undefined;
}
const allCameras = searchFilter?.cameras ?? Object.keys(config.cameras);
return {
camera: recording.camera,
start_time: recording.startTime,
allCameras: allCameras,
};
// previews will not update after item is selected
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [recording, searchResults]);
const selectedTimeRange = useMemo(() => {
if (!recording) {
return undefined;
}
const time = new Date(recording.startTime * 1000);
time.setUTCMinutes(0, 0, 0);
const start = time.getTime() / 1000;
time.setHours(time.getHours() + 2);
const end = time.getTime() / 1000;
return {
after: start,
before: end,
};
}, [recording]);
if (recording) {
if (selectedReviewData && selectedTimeRange) {
return (
<RecordingView
startCamera={selectedReviewData.camera}
startTime={selectedReviewData.start_time}
allCameras={selectedReviewData.allCameras}
allPreviews={allPreviews}
timeRange={selectedTimeRange}
updateFilter={setSearchFilter}
/>
);
}
} else {
return (
<SearchView
search={search}
searchTerm={searchTerm}
searchFilter={searchFilter}
searchResults={searchResults}
isLoading={(isLoadingInitialData || isLoadingMore) ?? true}
setSearch={setSearch}
setSimilaritySearch={(search) => setSearch(`similarity:${search.id}`)}
setSearchFilter={setSearchFilter}
onUpdateFilter={setSearchFilter}
onOpenSearch={onOpenSearch}
loadMore={loadMore}
hasMore={!isReachingEnd}
/>
);
}
}

View File

@@ -29,7 +29,6 @@ export type SearchResult = {
};
export type SearchFilter = {
query?: string;
cameras?: string[];
labels?: string[];
subLabels?: string[];
@@ -56,7 +55,7 @@ export type SearchQueryParams = {
};
export type SearchQuery = [string, SearchQueryParams] | null;
export type FilterType = Exclude<keyof SearchFilter, "query">;
export type FilterType = keyof SearchFilter;
export type SavedSearchQuery = {
name: string;

View File

@@ -285,11 +285,6 @@ export function endOfHourOrCurrentTime(timestamp: number) {
return Math.min(timestamp, now.getTime() / 1000);
}
export function getBeginningOfDayTimestamp(date: Date) {
date.setHours(0, 0, 0, 0);
return date.getTime() / 1000;
}
export function getEndOfDayTimestamp(date: Date) {
date.setHours(23, 59, 59, 999);
return date.getTime() / 1000;

View File

@@ -33,6 +33,7 @@ type SearchViewProps = {
setSimilaritySearch: (search: SearchResult) => void;
setSearchFilter: (filter: SearchFilter) => void;
onUpdateFilter: (filter: SearchFilter) => void;
onOpenSearch: (item: SearchResult) => void;
loadMore: () => void;
hasMore: boolean;
};
@@ -281,6 +282,7 @@ export default function SearchView({
"w-full justify-between md:justify-start lg:justify-end",
)}
filter={searchFilter}
searchTerm={searchTerm}
onUpdateFilter={onUpdateFilter}
/>
)}