Compare commits
61 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3d1ebdcbd5 | ||
|
|
91c6618cab | ||
|
|
65d1bb6449 | ||
|
|
1ffd0d3897 | ||
|
|
2139d621b5 | ||
|
|
d326846790 | ||
|
|
1a2ef37d95 | ||
|
|
384487af41 | ||
|
|
7bc0acc2f7 | ||
|
|
fcb4a094e5 | ||
|
|
957613b2af | ||
|
|
5fa0bfe6d3 | ||
|
|
e8d27d1f91 | ||
|
|
39ecffbf92 | ||
|
|
d5fb957503 | ||
|
|
ee81b5623e | ||
|
|
e010206efe | ||
|
|
052634d020 | ||
|
|
44d9c3a654 | ||
|
|
59088fd10f | ||
|
|
c6ef5160c1 | ||
|
|
11d8b304a3 | ||
|
|
52c1c9c327 | ||
|
|
d91602acee | ||
|
|
2b12117df5 | ||
|
|
4fb2f89ac8 | ||
|
|
c97457d22a | ||
|
|
6742363017 | ||
|
|
7c4f36716e | ||
|
|
304fc4d44b | ||
|
|
310af75c86 | ||
|
|
bd6b2868d0 | ||
|
|
d8cae0f597 | ||
|
|
27a743ca96 | ||
|
|
c269b4c320 | ||
|
|
d56b5dac28 | ||
|
|
15e01b67c9 | ||
|
|
65a178e8d3 | ||
|
|
81123f7aec | ||
|
|
4e2b52db56 | ||
|
|
bb36b9ced6 | ||
|
|
a90619bff7 | ||
|
|
1ac7b30fb7 | ||
|
|
ac5bd15fc1 | ||
|
|
b380f94297 | ||
|
|
173b7aa308 | ||
|
|
c4727f19e1 | ||
|
|
b8a74793ca | ||
|
|
c1dede9369 | ||
|
|
0c4ea504d8 | ||
|
|
b265b6b190 | ||
|
|
d57a61b50f | ||
|
|
4fc9106c17 | ||
|
|
38e098ca31 | ||
|
|
e7ad38d827 | ||
|
|
a1ce9aacf2 | ||
|
|
322b847356 | ||
|
|
98338e4c7f | ||
|
|
171a89f37b | ||
|
|
8114b541a8 | ||
|
|
c48396c5c6 |
2
.github/workflows/pull_request.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- "docs/**"
|
||||
|
||||
env:
|
||||
DEFAULT_PYTHON: 3.9
|
||||
DEFAULT_PYTHON: 3.11
|
||||
|
||||
jobs:
|
||||
build_devcontainer:
|
||||
|
||||
@@ -61,7 +61,7 @@ def start(id, num_detections, detection_queue, event):
|
||||
object_detector.cleanup()
|
||||
print(f"{id} - Processed for {duration:.2f} seconds.")
|
||||
print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
|
||||
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
|
||||
print(f"{id} - Average frame processing time: {mean(frame_times) * 1000:.2f}ms")
|
||||
|
||||
|
||||
######
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
hailo_version="4.19.0"
|
||||
hailo_version="4.20.0"
|
||||
|
||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
arch="x86_64"
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential cmake git wget
|
||||
|
||||
hailo_version="4.20.0"
|
||||
arch=$(uname -m)
|
||||
|
||||
if [[ $arch == "x86_64" ]]; then
|
||||
@@ -13,7 +14,7 @@ else
|
||||
fi
|
||||
|
||||
# Clone the HailoRT driver repository
|
||||
git clone --depth 1 --branch v4.19.0 https://github.com/hailo-ai/hailort-drivers.git
|
||||
git clone --depth 1 --branch v${hailo_version} https://github.com/hailo-ai/hailort-drivers.git
|
||||
|
||||
# Build and install the HailoRT driver
|
||||
cd hailort-drivers/linux/pcie
|
||||
|
||||
@@ -13,7 +13,7 @@ markupsafe == 2.1.*
|
||||
python-multipart == 0.0.12
|
||||
# General
|
||||
mypy == 1.6.1
|
||||
onvif_zeep == 0.2.12
|
||||
onvif-zeep-async == 3.1.*
|
||||
paho-mqtt == 2.1.*
|
||||
pandas == 2.2.*
|
||||
peewee == 3.17.*
|
||||
|
||||
20
docker/rockchip/COCO/coco_subset_20.txt
Normal file
@@ -0,0 +1,20 @@
|
||||
./subset/000000005001.jpg
|
||||
./subset/000000038829.jpg
|
||||
./subset/000000052891.jpg
|
||||
./subset/000000075612.jpg
|
||||
./subset/000000098261.jpg
|
||||
./subset/000000181542.jpg
|
||||
./subset/000000215245.jpg
|
||||
./subset/000000277005.jpg
|
||||
./subset/000000288685.jpg
|
||||
./subset/000000301421.jpg
|
||||
./subset/000000334371.jpg
|
||||
./subset/000000348481.jpg
|
||||
./subset/000000373353.jpg
|
||||
./subset/000000397681.jpg
|
||||
./subset/000000414673.jpg
|
||||
./subset/000000419312.jpg
|
||||
./subset/000000465822.jpg
|
||||
./subset/000000475732.jpg
|
||||
./subset/000000559707.jpg
|
||||
./subset/000000574315.jpg
|
||||
BIN
docker/rockchip/COCO/subset/000000005001.jpg
Normal file
|
After Width: | Height: | Size: 207 KiB |
BIN
docker/rockchip/COCO/subset/000000038829.jpg
Normal file
|
After Width: | Height: | Size: 209 KiB |
BIN
docker/rockchip/COCO/subset/000000052891.jpg
Normal file
|
After Width: | Height: | Size: 150 KiB |
BIN
docker/rockchip/COCO/subset/000000075612.jpg
Normal file
|
After Width: | Height: | Size: 102 KiB |
BIN
docker/rockchip/COCO/subset/000000098261.jpg
Normal file
|
After Width: | Height: | Size: 14 KiB |
BIN
docker/rockchip/COCO/subset/000000181542.jpg
Normal file
|
After Width: | Height: | Size: 201 KiB |
BIN
docker/rockchip/COCO/subset/000000215245.jpg
Normal file
|
After Width: | Height: | Size: 233 KiB |
BIN
docker/rockchip/COCO/subset/000000277005.jpg
Normal file
|
After Width: | Height: | Size: 242 KiB |
BIN
docker/rockchip/COCO/subset/000000288685.jpg
Normal file
|
After Width: | Height: | Size: 230 KiB |
BIN
docker/rockchip/COCO/subset/000000301421.jpg
Normal file
|
After Width: | Height: | Size: 80 KiB |
BIN
docker/rockchip/COCO/subset/000000334371.jpg
Normal file
|
After Width: | Height: | Size: 136 KiB |
BIN
docker/rockchip/COCO/subset/000000348481.jpg
Normal file
|
After Width: | Height: | Size: 113 KiB |
BIN
docker/rockchip/COCO/subset/000000373353.jpg
Normal file
|
After Width: | Height: | Size: 281 KiB |
BIN
docker/rockchip/COCO/subset/000000397681.jpg
Normal file
|
After Width: | Height: | Size: 272 KiB |
BIN
docker/rockchip/COCO/subset/000000414673.jpg
Normal file
|
After Width: | Height: | Size: 152 KiB |
BIN
docker/rockchip/COCO/subset/000000419312.jpg
Normal file
|
After Width: | Height: | Size: 166 KiB |
BIN
docker/rockchip/COCO/subset/000000465822.jpg
Normal file
|
After Width: | Height: | Size: 109 KiB |
BIN
docker/rockchip/COCO/subset/000000475732.jpg
Normal file
|
After Width: | Height: | Size: 103 KiB |
BIN
docker/rockchip/COCO/subset/000000559707.jpg
Normal file
|
After Width: | Height: | Size: 203 KiB |
BIN
docker/rockchip/COCO/subset/000000574315.jpg
Normal file
|
After Width: | Height: | Size: 110 KiB |
@@ -7,22 +7,26 @@ FROM wheels as rk-wheels
|
||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
|
||||
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
|
||||
RUN sed -i "/onnxruntime/d" /requirements-wheels.txt
|
||||
RUN python3 -m pip config set global.break-system-packages true
|
||||
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
|
||||
RUN rm -rf /rk-wheels/opencv_python-*
|
||||
|
||||
FROM deps AS rk-frigate
|
||||
ARG TARGETARCH
|
||||
|
||||
RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
|
||||
pip3 install -U /deps/rk-wheels/*.whl --break-system-packages
|
||||
pip3 install --no-deps -U /deps/rk-wheels/*.whl --break-system-packages
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
COPY docker/rockchip/COCO /COCO
|
||||
COPY docker/rockchip/conv2rknn.py /opt/conv2rknn.py
|
||||
|
||||
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/librknnrt.so /usr/lib/
|
||||
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/librknnrt.so /usr/lib/
|
||||
|
||||
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
|
||||
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffmpeg /usr/lib/ffmpeg/6.0/bin/
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffprobe /usr/lib/ffmpeg/6.0/bin/
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-6/ffmpeg /usr/lib/ffmpeg/6.0/bin/
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-6/ffprobe /usr/lib/ffmpeg/6.0/bin/
|
||||
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}"
|
||||
|
||||
82
docker/rockchip/conv2rknn.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import os
|
||||
|
||||
import rknn
|
||||
import yaml
|
||||
from rknn.api import RKNN
|
||||
|
||||
try:
|
||||
with open(rknn.__path__[0] + "/VERSION") as file:
|
||||
tk_version = file.read().strip()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
try:
|
||||
with open("/config/conv2rknn.yaml", "r") as config_file:
|
||||
configuration = yaml.safe_load(config_file)
|
||||
except FileNotFoundError:
|
||||
raise Exception("Please place a config.yaml file in /config/conv2rknn.yaml")
|
||||
|
||||
if configuration["config"] != None:
|
||||
rknn_config = configuration["config"]
|
||||
else:
|
||||
rknn_config = {}
|
||||
|
||||
if not os.path.isdir("/config/model_cache/rknn_cache/onnx"):
|
||||
raise Exception(
|
||||
"Place the onnx models you want to convert to rknn format in /config/model_cache/rknn_cache/onnx"
|
||||
)
|
||||
|
||||
if "soc" not in configuration:
|
||||
try:
|
||||
with open("/proc/device-tree/compatible") as file:
|
||||
soc = file.read().split(",")[-1].strip("\x00")
|
||||
except FileNotFoundError:
|
||||
raise Exception("Make sure to run docker in privileged mode.")
|
||||
|
||||
configuration["soc"] = [
|
||||
soc,
|
||||
]
|
||||
|
||||
if "quantization" not in configuration:
|
||||
configuration["quantization"] = False
|
||||
|
||||
if "output_name" not in configuration:
|
||||
configuration["output_name"] = "{{input_basename}}"
|
||||
|
||||
for input_filename in os.listdir("/config/model_cache/rknn_cache/onnx"):
|
||||
for soc in configuration["soc"]:
|
||||
quant = "i8" if configuration["quantization"] else "fp16"
|
||||
|
||||
input_path = "/config/model_cache/rknn_cache/onnx/" + input_filename
|
||||
input_basename = input_filename[: input_filename.rfind(".")]
|
||||
|
||||
output_filename = (
|
||||
configuration["output_name"].format(
|
||||
quant=quant,
|
||||
input_basename=input_basename,
|
||||
soc=soc,
|
||||
tk_version=tk_version,
|
||||
)
|
||||
+ ".rknn"
|
||||
)
|
||||
output_path = "/config/model_cache/rknn_cache/" + output_filename
|
||||
|
||||
rknn_config["target_platform"] = soc
|
||||
|
||||
rknn = RKNN(verbose=True)
|
||||
rknn.config(**rknn_config)
|
||||
|
||||
if rknn.load_onnx(model=input_path) != 0:
|
||||
raise Exception("Error loading model.")
|
||||
|
||||
if (
|
||||
rknn.build(
|
||||
do_quantization=configuration["quantization"],
|
||||
dataset="/COCO/coco_subset_20.txt",
|
||||
)
|
||||
!= 0
|
||||
):
|
||||
raise Exception("Error building model.")
|
||||
|
||||
if rknn.export_rknn(output_path) != 0:
|
||||
raise Exception("Error exporting rknn model.")
|
||||
@@ -1 +1,2 @@
|
||||
rknn-toolkit-lite2 @ https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/rknn_toolkit_lite2-2.0.0b0-cp311-cp311-linux_aarch64.whl
|
||||
rknn-toolkit2 == 2.3.0
|
||||
rknn-toolkit-lite2 == 2.3.0
|
||||
@@ -3,19 +3,18 @@
|
||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:24.10-py3
|
||||
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.12-py3
|
||||
|
||||
# Build TensorRT-specific library
|
||||
FROM ${TRT_BASE} AS trt-deps
|
||||
|
||||
ARG COMPUTE_LEVEL
|
||||
|
||||
# Need to wait for script to be adapted to newer version of tensorrt or perhaps decide that we want to remove the TRT detector in favor of using onnx runtime directly
|
||||
#RUN apt-get update \
|
||||
# && apt-get install -y git build-essential cuda-nvcc-* cuda-nvtx-* libnvinfer-dev libnvinfer-plugin-dev libnvparsers-dev libnvonnxparsers-dev \
|
||||
# && rm -rf /var/lib/apt/lists/*
|
||||
#RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \
|
||||
# /tensorrt_libyolo.sh
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y git build-essential cuda-nvcc-* cuda-nvtx-* libnvinfer-dev libnvinfer-plugin-dev libnvparsers-dev libnvonnxparsers-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \
|
||||
/tensorrt_libyolo.sh
|
||||
|
||||
# Frigate w/ TensorRT Support as separate image
|
||||
FROM deps AS tensorrt-base
|
||||
@@ -23,12 +22,9 @@ FROM deps AS tensorrt-base
|
||||
#Disable S6 Global timeout
|
||||
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
||||
|
||||
#COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
#COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
|
||||
COPY --from=trt-deps /usr/lib/x86_64-linux-gnu/libcudnn* /usr/local/cuda/lib64/
|
||||
COPY --from=trt-deps /usr/lib/x86_64-linux-gnu/libnv* /usr/local/cuda/lib64/
|
||||
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
COPY --from=trt-deps /usr/local/cuda-12.* /usr/local/cuda
|
||||
COPY docker/tensorrt/detector/rootfs/ /
|
||||
ENV YOLO_MODELS=""
|
||||
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
# NVidia TensorRT Support (amd64 only)
|
||||
--extra-index-url 'https://pypi.nvidia.com'
|
||||
numpy < 1.24; platform_machine == 'x86_64'
|
||||
tensorrt == 10.5.0; platform_machine == 'x86_64'
|
||||
cuda-python == 12.6.*; platform_machine == 'x86_64'
|
||||
tensorrt == 8.6.1.*; platform_machine == 'x86_64'
|
||||
cuda-python == 11.8.*; platform_machine == 'x86_64'
|
||||
cython == 3.0.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
|
||||
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
|
||||
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
|
||||
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
|
||||
onnx==1.16.*; platform_machine == 'x86_64'
|
||||
onnxruntime-gpu==1.20.*; platform_machine == 'x86_64'
|
||||
onnxruntime-gpu==1.18.*; platform_machine == 'x86_64'
|
||||
protobuf==3.20.3; platform_machine == 'x86_64'
|
||||
|
||||
@@ -67,14 +67,15 @@ ffmpeg:
|
||||
|
||||
### Annke C800
|
||||
|
||||
This camera is H.265 only. To be able to play clips on some devices (like MacOs or iPhone) the H.265 stream has to be repackaged and the audio stream has to be converted to aac. Unfortunately direct playback of in the browser is not working (yet), but the downloaded clip can be played locally.
|
||||
This camera is H.265 only. To be able to play clips on some devices (like MacOs or iPhone) the H.265 stream has to be adjusted using the `apple_compatibility` config.
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
annkec800: # <------ Name the camera
|
||||
ffmpeg:
|
||||
apple_compatibility: true # <- Adds compatibility with MacOS and iPhone
|
||||
output_args:
|
||||
record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -tag:v hvc1 -bsf:v hevc_mp4toannexb -c:a aac
|
||||
record: preset-record-generic-audio-aac
|
||||
|
||||
inputs:
|
||||
- path: rtsp://user:password@camera-ip:554/H264/ch1/main/av_stream # <----- Update for your camera
|
||||
@@ -156,7 +157,9 @@ cameras:
|
||||
|
||||
#### Reolink Doorbell
|
||||
|
||||
The reolink doorbell supports 2-way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
|
||||
The reolink doorbell supports two way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
|
||||
|
||||
Ensure HTTP is enabled in the camera's advanced network settings. To use two way talk with Frigate, see the [Live view documentation](/configuration/live#two-way-talk).
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
|
||||
@@ -175,6 +175,16 @@ For more information on the various values across different distributions, see h
|
||||
|
||||
Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=2 >> /etc/sysctl.d/local.conf'`
|
||||
|
||||
#### Stats for SR-IOV devices
|
||||
|
||||
When using virtualized GPUs via SR-IOV, additional args are needed for GPU stats to function. This can be enabled with the following config:
|
||||
|
||||
```yaml
|
||||
telemetry:
|
||||
stats:
|
||||
sriov: True
|
||||
```
|
||||
|
||||
## AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver
|
||||
|
||||
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
|
||||
|
||||
@@ -203,14 +203,13 @@ detectors:
|
||||
ov:
|
||||
type: openvino
|
||||
device: AUTO
|
||||
model:
|
||||
path: /openvino-model/ssdlite_mobilenet_v2.xml
|
||||
|
||||
model:
|
||||
width: 300
|
||||
height: 300
|
||||
input_tensor: nhwc
|
||||
input_pixel_format: bgr
|
||||
path: /openvino-model/ssdlite_mobilenet_v2.xml
|
||||
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
|
||||
|
||||
record:
|
||||
|
||||
@@ -29,7 +29,7 @@ The default video and audio codec on your camera may not always be compatible wi
|
||||
|
||||
### Audio Support
|
||||
|
||||
MSE Requires AAC audio, WebRTC requires PCMU/PCMA, or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
|
||||
MSE Requires PCMA/PCMU or AAC audio, WebRTC requires PCMA/PCMU or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
@@ -138,3 +138,13 @@ services:
|
||||
:::
|
||||
|
||||
See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.3#module-webrtc) for more information about this.
|
||||
|
||||
### Two way talk
|
||||
|
||||
For devices that support two way talk, Frigate can be configured to use the feature from the camera's Live view in the Web UI. You should:
|
||||
|
||||
- Set up go2rtc with [WebRTC](#webrtc-extra-configuration).
|
||||
- Ensure you access Frigate via https (may require [opening port 8971](/frigate/installation/#ports)).
|
||||
- For the Home Assistant Frigate card, [follow the docs](https://github.com/dermotduffy/frigate-hass-card?tab=readme-ov-file#using-2-way-audio) for the correct source.
|
||||
|
||||
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell)
|
||||
|
||||
@@ -144,7 +144,9 @@ detectors:
|
||||
|
||||
#### SSDLite MobileNet v2
|
||||
|
||||
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
|
||||
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model.
|
||||
|
||||
Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
@@ -254,6 +256,7 @@ yolov4x-mish-640
|
||||
yolov7-tiny-288
|
||||
yolov7-tiny-416
|
||||
yolov7-640
|
||||
yolov7-416
|
||||
yolov7-320
|
||||
yolov7x-640
|
||||
yolov7x-320
|
||||
@@ -282,6 +285,8 @@ The TensorRT detector can be selected by specifying `tensorrt` as the model type
|
||||
|
||||
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
|
||||
|
||||
Use the config below to work with generated TRT models:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
tensorrt:
|
||||
@@ -501,11 +506,12 @@ detectors:
|
||||
cpu1:
|
||||
type: cpu
|
||||
num_threads: 3
|
||||
model:
|
||||
path: "/custom_model.tflite"
|
||||
cpu2:
|
||||
type: cpu
|
||||
num_threads: 3
|
||||
|
||||
model:
|
||||
path: "/custom_model.tflite"
|
||||
```
|
||||
|
||||
When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance.
|
||||
@@ -544,7 +550,7 @@ Hardware accelerated object detection is supported on the following SoCs:
|
||||
- RK3576
|
||||
- RK3588
|
||||
|
||||
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.0.0.beta0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
|
||||
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
@@ -617,7 +623,41 @@ $ cat /sys/kernel/debug/rknpu/load
|
||||
:::
|
||||
|
||||
- All models are automatically downloaded and stored in the folder `config/model_cache/rknn_cache`. After upgrading Frigate, you should remove older models to free up space.
|
||||
- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2` (requires a x86 machine). Note, that there is only post-processing for the supported models.
|
||||
- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2`. Note, that there is only post-processing for the supported models.
|
||||
|
||||
### Converting your own onnx model to rknn format
|
||||
|
||||
To convert a onnx model to the rknn format using the [rknn-toolkit2](https://github.com/airockchip/rknn-toolkit2/) you have to:
|
||||
|
||||
- Place one ore more models in onnx format in the directory `config/model_cache/rknn_cache/onnx` on your docker host (this might require `sudo` privileges).
|
||||
- Save the configuration file under `config/conv2rknn.yaml` (see below for details).
|
||||
- Run `docker exec <frigate_container_id> python3 /opt/conv2rknn.py`. If the conversion was successful, the rknn models will be placed in `config/model_cache/rknn_cache`.
|
||||
|
||||
This is an example configuration file that you need to adjust to your specific onnx model:
|
||||
|
||||
```yaml
|
||||
soc: ["rk3562","rk3566", "rk3568", "rk3576", "rk3588"]
|
||||
quantization: false
|
||||
|
||||
output_name: "{input_basename}"
|
||||
|
||||
config:
|
||||
mean_values: [[0, 0, 0]]
|
||||
std_values: [[255, 255, 255]]
|
||||
quant_img_rgb2bgr: true
|
||||
```
|
||||
|
||||
Explanation of the paramters:
|
||||
|
||||
- `soc`: A list of all SoCs you want to build the rknn model for. If you don't specify this parameter, the script tries to find out your SoC and builds the rknn model for this one.
|
||||
- `quantization`: true: 8 bit integer (i8) quantization, false: 16 bit float (fp16). Default: false.
|
||||
- `output_name`: The output name of the model. The following variables are available:
|
||||
- `quant`: "i8" or "fp16" depending on the config
|
||||
- `input_basename`: the basename of the input model (e.g. "my_model" if the input model is calles "my_model.onnx")
|
||||
- `soc`: the SoC this model was build for (e.g. "rk3588")
|
||||
- `tk_version`: Version of `rknn-toolkit2` (e.g. "2.3.0")
|
||||
- **example**: Specifying `output_name = "frigate-{quant}-{input_basename}-{soc}-v{tk_version}"` could result in a model called `frigate-i8-my_model-rk3588-v2.3.0.rknn`.
|
||||
- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.0_EN.pdf).
|
||||
|
||||
## Hailo-8l
|
||||
|
||||
@@ -632,8 +672,6 @@ detectors:
|
||||
hailo8l:
|
||||
type: hailo8l
|
||||
device: PCIe
|
||||
model:
|
||||
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
|
||||
|
||||
model:
|
||||
width: 300
|
||||
@@ -641,4 +679,5 @@ model:
|
||||
input_tensor: nhwc
|
||||
input_pixel_format: bgr
|
||||
model_type: ssd
|
||||
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
|
||||
```
|
||||
|
||||
@@ -52,7 +52,7 @@ detectors:
|
||||
# Required: name of the detector
|
||||
detector_name:
|
||||
# Required: type of the detector
|
||||
# Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below)
|
||||
# Frigate provides many types, see https://docs.frigate.video/configuration/object_detectors for more details (default: shown below)
|
||||
# Additional detector types can also be plugged in.
|
||||
# Detectors may require additional configuration.
|
||||
# Refer to the Detectors configuration page for more information.
|
||||
@@ -117,25 +117,27 @@ auth:
|
||||
hash_iterations: 600000
|
||||
|
||||
# Optional: model modifications
|
||||
# NOTE: The default values are for the EdgeTPU detector.
|
||||
# Other detectors will require the model config to be set.
|
||||
model:
|
||||
# Optional: path to the model (default: automatic based on detector)
|
||||
# Required: path to the model (default: automatic based on detector)
|
||||
path: /edgetpu_model.tflite
|
||||
# Optional: path to the labelmap (default: shown below)
|
||||
# Required: path to the labelmap (default: shown below)
|
||||
labelmap_path: /labelmap.txt
|
||||
# Required: Object detection model input width (default: shown below)
|
||||
width: 320
|
||||
# Required: Object detection model input height (default: shown below)
|
||||
height: 320
|
||||
# Optional: Object detection model input colorspace
|
||||
# Required: Object detection model input colorspace
|
||||
# Valid values are rgb, bgr, or yuv. (default: shown below)
|
||||
input_pixel_format: rgb
|
||||
# Optional: Object detection model input tensor format
|
||||
# Required: Object detection model input tensor format
|
||||
# Valid values are nhwc or nchw (default: shown below)
|
||||
input_tensor: nhwc
|
||||
# Optional: Object detection model type, currently only used with the OpenVINO detector
|
||||
# Required: Object detection model type, currently only used with the OpenVINO detector
|
||||
# Valid values are ssd, yolox, yolonas (default: shown below)
|
||||
model_type: ssd
|
||||
# Optional: Label name modifications. These are merged into the standard labelmap.
|
||||
# Required: Label name modifications. These are merged into the standard labelmap.
|
||||
labelmap:
|
||||
2: vehicle
|
||||
# Optional: Map of object labels to their attribute labels (default: depends on model)
|
||||
@@ -242,6 +244,8 @@ ffmpeg:
|
||||
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
|
||||
# NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout.
|
||||
retry_interval: 10
|
||||
# Optional: Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players. (default: shown below)
|
||||
apple_compatibility: false
|
||||
|
||||
# Optional: Detect configuration
|
||||
# NOTE: Can be overridden at the camera level
|
||||
@@ -811,11 +815,13 @@ telemetry:
|
||||
- lo
|
||||
# Optional: Configure system stats
|
||||
stats:
|
||||
# Enable AMD GPU stats (default: shown below)
|
||||
# Optional: Enable AMD GPU stats (default: shown below)
|
||||
amd_gpu_stats: True
|
||||
# Enable Intel GPU stats (default: shown below)
|
||||
# Optional: Enable Intel GPU stats (default: shown below)
|
||||
intel_gpu_stats: True
|
||||
# Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
|
||||
# Optional: Treat GPU as SR-IOV to fix GPU stats (default: shown below)
|
||||
sriov: False
|
||||
# Optional: Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
|
||||
# NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled.
|
||||
network_bandwidth: False
|
||||
# Optional: Enable the latest version outbound check (default: shown below)
|
||||
|
||||
@@ -305,8 +305,15 @@ To install make sure you have the [community app plugin here](https://forums.unr
|
||||
|
||||
## Proxmox
|
||||
|
||||
It is recommended to run Frigate in LXC, rather than in a VM, for maximum performance. The setup can be complex so be prepared to read the Proxmox and LXC documentation. Suggestions include:
|
||||
[According to Proxmox documentation](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_pct) it is recommended that you run application containers like Frigate inside a Proxmox QEMU VM. This will give you all the advantages of application containerization, while also providing the benefits that VMs offer, such as strong isolation from the host and the ability to live-migrate, which otherwise isn’t possible with containers.
|
||||
|
||||
:::warning
|
||||
|
||||
If you choose to run Frigate via LXC in Proxmox the setup can be complex so be prepared to read the Proxmox and LXC documentation, Frigate does not officially support running inside of an LXC.
|
||||
|
||||
:::
|
||||
|
||||
Suggestions include:
|
||||
- For Intel-based hardware acceleration, to allow access to the `/dev/dri/renderD128` device with major number 226 and minor number 128, add the following lines to the `/etc/pve/lxc/<id>.conf` LXC configuration:
|
||||
- `lxc.cgroup2.devices.allow: c 226:128 rwm`
|
||||
- `lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file`
|
||||
|
||||
@@ -3,7 +3,15 @@ id: recordings
|
||||
title: Troubleshooting Recordings
|
||||
---
|
||||
|
||||
### WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
|
||||
## I have Frigate configured for motion recording only, but it still seems to be recording even with no motion. Why?
|
||||
|
||||
You'll want to:
|
||||
|
||||
- Make sure your camera's timestamp is masked out with a motion mask. Even if there is no motion occurring in your scene, your motion settings may be sensitive enough to count your timestamp as motion.
|
||||
- If you have audio detection enabled, keep in mind that audio that is heard above `min_volume` is considered motion.
|
||||
- [Tune your motion detection settings](/configuration/motion_detection) either by editing your config file or by using the UI's Motion Tuner.
|
||||
|
||||
## I see the message: WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
|
||||
|
||||
This error can be caused by a number of different issues. The first step in troubleshooting is to enable debug logging for recording. This will enable logging showing how long it takes for recordings to be moved from RAM cache to the disk.
|
||||
|
||||
@@ -40,6 +48,7 @@ On linux, some helpful tools/commands in diagnosing would be:
|
||||
On modern linux kernels, the system will utilize some swap if enabled. Setting vm.swappiness=1 no longer means that the kernel will only swap in order to avoid OOM. To prevent any swapping inside a container, set allocations memory and memory+swap to be the same and disable swapping by setting the following docker/podman run parameters:
|
||||
|
||||
**Compose example**
|
||||
|
||||
```yaml
|
||||
version: "3.9"
|
||||
services:
|
||||
@@ -54,6 +63,7 @@ services:
|
||||
```
|
||||
|
||||
**Run command example**
|
||||
|
||||
```
|
||||
--memory=<MAXRAM> --memory-swap=<MAXSWAP> --memory-swappiness=0
|
||||
```
|
||||
|
||||
@@ -84,9 +84,10 @@ def main() -> None:
|
||||
except Exception as traverse_error:
|
||||
print(f"Could not determine exact line number: {traverse_error}")
|
||||
|
||||
print(f"Line # : {line_number}")
|
||||
print(f"Key : {' -> '.join(map(str, error_path))}")
|
||||
print(f"Value : {error.get('input','-')}")
|
||||
if current != full_config:
|
||||
print(f"Line # : {line_number}")
|
||||
print(f"Key : {' -> '.join(map(str, error_path))}")
|
||||
print(f"Value : {error.get('input', '-')}")
|
||||
print(f"Message : {error.get('msg', error.get('type', 'Unknown'))}\n")
|
||||
|
||||
print("*************************************************************")
|
||||
|
||||
@@ -142,6 +142,8 @@ def config(request: Request):
|
||||
mode="json", warnings="none", exclude_none=True
|
||||
)
|
||||
for stream_name, stream in go2rtc.get("streams", {}).items():
|
||||
if stream is None:
|
||||
continue
|
||||
if isinstance(stream, str):
|
||||
cleaned = clean_camera_user_pass(stream)
|
||||
else:
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import string
|
||||
|
||||
from fastapi import APIRouter, Request, UploadFile
|
||||
from fastapi.responses import JSONResponse
|
||||
@@ -21,8 +24,18 @@ def get_faces():
|
||||
face_dict: dict[str, list[str]] = {}
|
||||
|
||||
for name in os.listdir(FACE_DIR):
|
||||
face_dir = os.path.join(FACE_DIR, name)
|
||||
|
||||
if not os.path.isdir(face_dir):
|
||||
continue
|
||||
|
||||
face_dict[name] = []
|
||||
for file in os.listdir(os.path.join(FACE_DIR, name)):
|
||||
|
||||
for file in sorted(
|
||||
os.listdir(face_dir),
|
||||
key=lambda f: os.path.getctime(os.path.join(face_dir, f)),
|
||||
reverse=True,
|
||||
):
|
||||
face_dict[name].append(file)
|
||||
|
||||
return JSONResponse(status_code=200, content=face_dict)
|
||||
@@ -30,16 +43,71 @@ def get_faces():
|
||||
|
||||
@router.post("/faces/{name}")
|
||||
async def register_face(request: Request, name: str, file: UploadFile):
|
||||
if not request.app.frigate_config.face_recognition.enabled:
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content={"message": "Face recognition is not enabled.", "success": False},
|
||||
)
|
||||
|
||||
context: EmbeddingsContext = request.app.embeddings
|
||||
context.register_face(name, await file.read())
|
||||
result = context.register_face(name, await file.read())
|
||||
return JSONResponse(
|
||||
status_code=200 if result.get("success", True) else 400,
|
||||
content=result,
|
||||
)
|
||||
|
||||
|
||||
@router.post("/faces/train/{name}/classify")
|
||||
def train_face(request: Request, name: str, body: dict = None):
|
||||
if not request.app.frigate_config.face_recognition.enabled:
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content={"message": "Face recognition is not enabled.", "success": False},
|
||||
)
|
||||
|
||||
json: dict[str, any] = body or {}
|
||||
training_file = os.path.join(
|
||||
FACE_DIR, f"train/{sanitize_filename(json.get('training_file', ''))}"
|
||||
)
|
||||
|
||||
if not training_file or not os.path.isfile(training_file):
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"Invalid filename or no file exists: {training_file}",
|
||||
}
|
||||
),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||
new_name = f"{name}-{rand_id}.webp"
|
||||
new_file = os.path.join(FACE_DIR, f"{name}/{new_name}")
|
||||
shutil.move(training_file, new_file)
|
||||
|
||||
context: EmbeddingsContext = request.app.embeddings
|
||||
context.clear_face_classifier()
|
||||
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"Successfully saved {training_file} as {new_name}.",
|
||||
}
|
||||
),
|
||||
status_code=200,
|
||||
content={"success": True, "message": "Successfully registered face."},
|
||||
)
|
||||
|
||||
|
||||
@router.post("/faces/{name}/delete")
|
||||
def deregister_faces(request: Request, name: str, body: dict = None):
|
||||
if not request.app.frigate_config.face_recognition.enabled:
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content={"message": "Face recognition is not enabled.", "success": False},
|
||||
)
|
||||
|
||||
json: dict[str, any] = body or {}
|
||||
list_of_ids = json.get("ids", "")
|
||||
|
||||
|
||||
@@ -34,10 +34,12 @@ from frigate.const import (
|
||||
CLIPS_DIR,
|
||||
CONFIG_DIR,
|
||||
EXPORT_DIR,
|
||||
FACE_DIR,
|
||||
MODEL_CACHE_DIR,
|
||||
RECORD_DIR,
|
||||
SHM_FRAMES_VAR,
|
||||
)
|
||||
from frigate.data_processing.types import DataProcessorMetrics
|
||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.embeddings import EmbeddingsContext, manage_embeddings
|
||||
from frigate.events.audio import AudioProcessor
|
||||
@@ -88,6 +90,9 @@ class FrigateApp:
|
||||
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
|
||||
self.log_queue: Queue = mp.Queue()
|
||||
self.camera_metrics: dict[str, CameraMetrics] = {}
|
||||
self.embeddings_metrics: DataProcessorMetrics | None = (
|
||||
DataProcessorMetrics() if config.semantic_search.enabled else None
|
||||
)
|
||||
self.ptz_metrics: dict[str, PTZMetrics] = {}
|
||||
self.processes: dict[str, int] = {}
|
||||
self.embeddings: Optional[EmbeddingsContext] = None
|
||||
@@ -96,14 +101,19 @@ class FrigateApp:
|
||||
self.config = config
|
||||
|
||||
def ensure_dirs(self) -> None:
|
||||
for d in [
|
||||
dirs = [
|
||||
CONFIG_DIR,
|
||||
RECORD_DIR,
|
||||
f"{CLIPS_DIR}/cache",
|
||||
CACHE_DIR,
|
||||
MODEL_CACHE_DIR,
|
||||
EXPORT_DIR,
|
||||
]:
|
||||
]
|
||||
|
||||
if self.config.face_recognition.enabled:
|
||||
dirs.append(FACE_DIR)
|
||||
|
||||
for d in dirs:
|
||||
if not os.path.exists(d) and not os.path.islink(d):
|
||||
logger.info(f"Creating directory: {d}")
|
||||
os.makedirs(d)
|
||||
@@ -229,7 +239,10 @@ class FrigateApp:
|
||||
embedding_process = util.Process(
|
||||
target=manage_embeddings,
|
||||
name="embeddings_manager",
|
||||
args=(self.config,),
|
||||
args=(
|
||||
self.config,
|
||||
self.embeddings_metrics,
|
||||
),
|
||||
)
|
||||
embedding_process.daemon = True
|
||||
self.embedding_process = embedding_process
|
||||
@@ -491,7 +504,11 @@ class FrigateApp:
|
||||
self.stats_emitter = StatsEmitter(
|
||||
self.config,
|
||||
stats_init(
|
||||
self.config, self.camera_metrics, self.detectors, self.processes
|
||||
self.config,
|
||||
self.camera_metrics,
|
||||
self.embeddings_metrics,
|
||||
self.detectors,
|
||||
self.processes,
|
||||
),
|
||||
self.stop_event,
|
||||
)
|
||||
|
||||
130
frigate/camera/activity_manager.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""Manage camera activity and updating listeners."""
|
||||
|
||||
from collections import Counter
|
||||
from typing import Callable
|
||||
|
||||
from frigate.config.config import FrigateConfig
|
||||
|
||||
|
||||
class CameraActivityManager:
|
||||
def __init__(
|
||||
self, config: FrigateConfig, publish: Callable[[str, any], None]
|
||||
) -> None:
|
||||
self.config = config
|
||||
self.publish = publish
|
||||
self.last_camera_activity: dict[str, dict[str, any]] = {}
|
||||
self.camera_all_object_counts: dict[str, Counter] = {}
|
||||
self.camera_active_object_counts: dict[str, Counter] = {}
|
||||
self.zone_all_object_counts: dict[str, Counter] = {}
|
||||
self.zone_active_object_counts: dict[str, Counter] = {}
|
||||
self.all_zone_labels: dict[str, set[str]] = {}
|
||||
|
||||
for camera_config in config.cameras.values():
|
||||
if not camera_config.enabled:
|
||||
continue
|
||||
|
||||
self.last_camera_activity[camera_config.name] = {}
|
||||
self.camera_all_object_counts[camera_config.name] = Counter()
|
||||
self.camera_active_object_counts[camera_config.name] = Counter()
|
||||
|
||||
for zone, zone_config in camera_config.zones.items():
|
||||
if zone not in self.all_zone_labels:
|
||||
self.zone_all_object_counts[zone] = Counter()
|
||||
self.zone_active_object_counts[zone] = Counter()
|
||||
self.all_zone_labels[zone] = set()
|
||||
|
||||
self.all_zone_labels[zone].update(zone_config.objects)
|
||||
|
||||
def update_activity(self, new_activity: dict[str, dict[str, any]]) -> None:
|
||||
all_objects: list[dict[str, any]] = []
|
||||
|
||||
for camera in new_activity.keys():
|
||||
new_objects = new_activity[camera].get("objects", [])
|
||||
all_objects.extend(new_objects)
|
||||
|
||||
if self.last_camera_activity.get(camera, {}).get("objects") != new_objects:
|
||||
self.compare_camera_activity(camera, new_objects)
|
||||
|
||||
# run through every zone, getting a count of objects in that zone right now
|
||||
for zone, labels in self.all_zone_labels.items():
|
||||
all_zone_objects = Counter(
|
||||
obj["label"].replace("-verified", "")
|
||||
for obj in all_objects
|
||||
if zone in obj["current_zones"]
|
||||
)
|
||||
active_zone_objects = Counter(
|
||||
obj["label"].replace("-verified", "")
|
||||
for obj in all_objects
|
||||
if zone in obj["current_zones"] and not obj["stationary"]
|
||||
)
|
||||
any_changed = False
|
||||
|
||||
# run through each object and check what topics need to be updated for this zone
|
||||
for label in labels:
|
||||
new_count = all_zone_objects[label]
|
||||
new_active_count = active_zone_objects[label]
|
||||
|
||||
if (
|
||||
new_count != self.zone_all_object_counts[zone][label]
|
||||
or label not in self.zone_all_object_counts[zone]
|
||||
):
|
||||
any_changed = True
|
||||
self.publish(f"{zone}/{label}", new_count)
|
||||
self.zone_all_object_counts[zone][label] = new_count
|
||||
|
||||
if (
|
||||
new_active_count != self.zone_active_object_counts[zone][label]
|
||||
or label not in self.zone_active_object_counts[zone]
|
||||
):
|
||||
any_changed = True
|
||||
self.publish(f"{zone}/{label}/active", new_active_count)
|
||||
self.zone_active_object_counts[zone][label] = new_active_count
|
||||
|
||||
if any_changed:
|
||||
self.publish(f"{zone}/all", sum(list(all_zone_objects.values())))
|
||||
self.publish(
|
||||
f"{zone}/all/active", sum(list(active_zone_objects.values()))
|
||||
)
|
||||
|
||||
self.last_camera_activity = new_activity
|
||||
|
||||
def compare_camera_activity(
|
||||
self, camera: str, new_activity: dict[str, any]
|
||||
) -> None:
|
||||
all_objects = Counter(
|
||||
obj["label"].replace("-verified", "") for obj in new_activity
|
||||
)
|
||||
active_objects = Counter(
|
||||
obj["label"].replace("-verified", "")
|
||||
for obj in new_activity
|
||||
if not obj["stationary"]
|
||||
)
|
||||
any_changed = False
|
||||
|
||||
# run through each object and check what topics need to be updated
|
||||
for label in self.config.cameras[camera].objects.track:
|
||||
if label in self.config.model.all_attributes:
|
||||
continue
|
||||
|
||||
new_count = all_objects[label]
|
||||
new_active_count = active_objects[label]
|
||||
|
||||
if (
|
||||
new_count != self.camera_all_object_counts[camera][label]
|
||||
or label not in self.camera_all_object_counts[camera]
|
||||
):
|
||||
any_changed = True
|
||||
self.publish(f"{camera}/{label}", new_count)
|
||||
self.camera_all_object_counts[camera][label] = new_count
|
||||
|
||||
if (
|
||||
new_active_count != self.camera_active_object_counts[camera][label]
|
||||
or label not in self.camera_active_object_counts[camera]
|
||||
):
|
||||
any_changed = True
|
||||
self.publish(f"{camera}/{label}/active", new_active_count)
|
||||
self.camera_active_object_counts[camera][label] = new_active_count
|
||||
|
||||
if any_changed:
|
||||
self.publish(f"{camera}/all", sum(list(all_objects.values())))
|
||||
self.publish(f"{camera}/all/active", sum(list(active_objects.values())))
|
||||
@@ -7,6 +7,7 @@ from abc import ABC, abstractmethod
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
from frigate.camera import PTZMetrics
|
||||
from frigate.camera.activity_manager import CameraActivityManager
|
||||
from frigate.comms.config_updater import ConfigPublisher
|
||||
from frigate.config import BirdseyeModeEnum, FrigateConfig
|
||||
from frigate.const import (
|
||||
@@ -64,7 +65,7 @@ class Dispatcher:
|
||||
self.onvif = onvif
|
||||
self.ptz_metrics = ptz_metrics
|
||||
self.comms = communicators
|
||||
self.camera_activity = {}
|
||||
self.camera_activity = CameraActivityManager(config, self.publish)
|
||||
self.model_state = {}
|
||||
self.embeddings_reindex = {}
|
||||
|
||||
@@ -130,7 +131,7 @@ class Dispatcher:
|
||||
).execute()
|
||||
|
||||
def handle_update_camera_activity():
|
||||
self.camera_activity = payload
|
||||
self.camera_activity.update_activity(payload)
|
||||
|
||||
def handle_update_event_description():
|
||||
event: Event = Event.get(Event.id == payload["id"])
|
||||
@@ -171,7 +172,7 @@ class Dispatcher:
|
||||
)
|
||||
|
||||
def handle_on_connect():
|
||||
camera_status = self.camera_activity.copy()
|
||||
camera_status = self.camera_activity.last_camera_activity.copy()
|
||||
|
||||
for camera in camera_status.keys():
|
||||
camera_status[camera]["config"] = {
|
||||
|
||||
@@ -9,6 +9,7 @@ SOCKET_REP_REQ = "ipc:///tmp/cache/embeddings"
|
||||
|
||||
|
||||
class EmbeddingsRequestEnum(Enum):
|
||||
clear_face_classifier = "clear_face_classifier"
|
||||
embed_description = "embed_description"
|
||||
embed_thumbnail = "embed_thumbnail"
|
||||
generate_search = "generate_search"
|
||||
|
||||
@@ -151,7 +151,7 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
||||
camera: str = payload["after"]["camera"]
|
||||
title = f"{', '.join(sorted_objects).replace('_', ' ').title()}{' was' if state == 'end' else ''} detected in {', '.join(payload['after']['data']['zones']).replace('_', ' ').title()}"
|
||||
message = f"Detected on {camera.replace('_', ' ').title()}"
|
||||
image = f'{payload["after"]["thumb_path"].replace("/media/frigate", "")}'
|
||||
image = f"{payload['after']['thumb_path'].replace('/media/frigate', '')}"
|
||||
|
||||
# if event is ongoing open to live view otherwise open to recordings view
|
||||
direct_url = f"/review?id={reviewId}" if state == "end" else f"/#{camera}"
|
||||
|
||||
@@ -3,13 +3,13 @@ from frigate.detectors import DetectorConfig, ModelConfig # noqa: F401
|
||||
from .auth import * # noqa: F403
|
||||
from .camera import * # noqa: F403
|
||||
from .camera_group import * # noqa: F403
|
||||
from .classification import * # noqa: F403
|
||||
from .config import * # noqa: F403
|
||||
from .database import * # noqa: F403
|
||||
from .logger import * # noqa: F403
|
||||
from .mqtt import * # noqa: F403
|
||||
from .notification import * # noqa: F403
|
||||
from .proxy import * # noqa: F403
|
||||
from .semantic_search import * # noqa: F403
|
||||
from .telemetry import * # noqa: F403
|
||||
from .tls import * # noqa: F403
|
||||
from .ui import * # noqa: F403
|
||||
|
||||
@@ -167,7 +167,7 @@ class CameraConfig(FrigateBaseModel):
|
||||
record_args = get_ffmpeg_arg_list(
|
||||
parse_preset_output_record(
|
||||
self.ffmpeg.output_args.record,
|
||||
self.ffmpeg.output_args._force_record_hvc1,
|
||||
self.ffmpeg.apple_compatibility,
|
||||
)
|
||||
or self.ffmpeg.output_args.record
|
||||
)
|
||||
|
||||
@@ -2,7 +2,7 @@ import shutil
|
||||
from enum import Enum
|
||||
from typing import Union
|
||||
|
||||
from pydantic import Field, PrivateAttr, field_validator
|
||||
from pydantic import Field, field_validator
|
||||
|
||||
from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS
|
||||
|
||||
@@ -42,7 +42,6 @@ class FfmpegOutputArgsConfig(FrigateBaseModel):
|
||||
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
|
||||
title="Record role FFmpeg output arguments.",
|
||||
)
|
||||
_force_record_hvc1: bool = PrivateAttr(default=False)
|
||||
|
||||
|
||||
class FfmpegConfig(FrigateBaseModel):
|
||||
@@ -64,6 +63,10 @@ class FfmpegConfig(FrigateBaseModel):
|
||||
default=10.0,
|
||||
title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
|
||||
)
|
||||
apple_compatibility: bool = Field(
|
||||
default=False,
|
||||
title="Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players.",
|
||||
)
|
||||
|
||||
@property
|
||||
def ffmpeg_path(self) -> str:
|
||||
|
||||
@@ -85,7 +85,7 @@ class ZoneConfig(BaseModel):
|
||||
if explicit:
|
||||
self.coordinates = ",".join(
|
||||
[
|
||||
f'{round(int(p.split(",")[0]) / frame_shape[1], 3)},{round(int(p.split(",")[1]) / frame_shape[0], 3)}'
|
||||
f"{round(int(p.split(',')[0]) / frame_shape[1], 3)},{round(int(p.split(',')[1]) / frame_shape[0], 3)}"
|
||||
for p in coordinates
|
||||
]
|
||||
)
|
||||
|
||||
@@ -11,6 +11,22 @@ __all__ = [
|
||||
]
|
||||
|
||||
|
||||
class BirdClassificationConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable bird classification.")
|
||||
threshold: float = Field(
|
||||
default=0.9,
|
||||
title="Minimum classification score required to be considered a match.",
|
||||
gt=0.0,
|
||||
le=1.0,
|
||||
)
|
||||
|
||||
|
||||
class ClassificationConfig(FrigateBaseModel):
|
||||
bird: BirdClassificationConfig = Field(
|
||||
default_factory=BirdClassificationConfig, title="Bird classification config."
|
||||
)
|
||||
|
||||
|
||||
class SemanticSearchConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable semantic search.")
|
||||
reindex: Optional[bool] = Field(
|
||||
@@ -51,17 +51,18 @@ from .camera.review import ReviewConfig
|
||||
from .camera.snapshots import SnapshotsConfig
|
||||
from .camera.timestamp import TimestampStyleConfig
|
||||
from .camera_group import CameraGroupConfig
|
||||
from .classification import (
|
||||
ClassificationConfig,
|
||||
FaceRecognitionConfig,
|
||||
LicensePlateRecognitionConfig,
|
||||
SemanticSearchConfig,
|
||||
)
|
||||
from .database import DatabaseConfig
|
||||
from .env import EnvVars
|
||||
from .logger import LoggerConfig
|
||||
from .mqtt import MqttConfig
|
||||
from .notification import NotificationConfig
|
||||
from .proxy import ProxyConfig
|
||||
from .semantic_search import (
|
||||
FaceRecognitionConfig,
|
||||
LicensePlateRecognitionConfig,
|
||||
SemanticSearchConfig,
|
||||
)
|
||||
from .telemetry import TelemetryConfig
|
||||
from .tls import TlsConfig
|
||||
from .ui import UIConfig
|
||||
@@ -331,6 +332,9 @@ class FrigateConfig(FrigateBaseModel):
|
||||
default_factory=TelemetryConfig, title="Telemetry configuration."
|
||||
)
|
||||
tls: TlsConfig = Field(default_factory=TlsConfig, title="TLS configuration.")
|
||||
classification: ClassificationConfig = Field(
|
||||
default_factory=ClassificationConfig, title="Object classification config."
|
||||
)
|
||||
semantic_search: SemanticSearchConfig = Field(
|
||||
default_factory=SemanticSearchConfig, title="Semantic search configuration."
|
||||
)
|
||||
@@ -458,13 +462,12 @@ class FrigateConfig(FrigateBaseModel):
|
||||
camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args
|
||||
|
||||
for input in camera_config.ffmpeg.inputs:
|
||||
need_record_fourcc = False and "record" in input.roles
|
||||
need_detect_dimensions = "detect" in input.roles and (
|
||||
camera_config.detect.height is None
|
||||
or camera_config.detect.width is None
|
||||
)
|
||||
|
||||
if need_detect_dimensions or need_record_fourcc:
|
||||
if need_detect_dimensions:
|
||||
stream_info = {"width": 0, "height": 0, "fourcc": None}
|
||||
try:
|
||||
stream_info = stream_info_retriever.get_stream_info(
|
||||
@@ -488,14 +491,6 @@ class FrigateConfig(FrigateBaseModel):
|
||||
else DEFAULT_DETECT_DIMENSIONS["height"]
|
||||
)
|
||||
|
||||
if need_record_fourcc:
|
||||
# Apple only supports HEVC if it is hvc1 (vs. hev1)
|
||||
camera_config.ffmpeg.output_args._force_record_hvc1 = (
|
||||
stream_info["fourcc"] == "hevc"
|
||||
if stream_info.get("hevc")
|
||||
else False
|
||||
)
|
||||
|
||||
# Warn if detect fps > 10
|
||||
if camera_config.detect.fps > 10:
|
||||
logger.warning(
|
||||
@@ -610,35 +605,27 @@ class FrigateConfig(FrigateBaseModel):
|
||||
if isinstance(detector, dict)
|
||||
else detector.model_dump(warnings="none")
|
||||
)
|
||||
detector_config: DetectorConfig = adapter.validate_python(model_dict)
|
||||
if detector_config.model is None:
|
||||
detector_config.model = self.model.model_copy()
|
||||
else:
|
||||
path = detector_config.model.path
|
||||
detector_config.model = self.model.model_copy()
|
||||
detector_config.model.path = path
|
||||
detector_config: BaseDetectorConfig = adapter.validate_python(model_dict)
|
||||
|
||||
if "path" not in model_dict or len(model_dict.keys()) > 1:
|
||||
logger.warning(
|
||||
"Customizing more than a detector model path is unsupported."
|
||||
)
|
||||
# users should not set model themselves
|
||||
if detector_config.model:
|
||||
detector_config.model = None
|
||||
|
||||
merged_model = deep_merge(
|
||||
detector_config.model.model_dump(exclude_unset=True, warnings="none"),
|
||||
self.model.model_dump(exclude_unset=True, warnings="none"),
|
||||
)
|
||||
model_config = self.model.model_dump(exclude_unset=True, warnings="none")
|
||||
|
||||
if "path" not in merged_model:
|
||||
if detector_config.model_path:
|
||||
model_config["path"] = detector_config.model_path
|
||||
|
||||
if "path" not in model_config:
|
||||
if detector_config.type == "cpu":
|
||||
merged_model["path"] = "/cpu_model.tflite"
|
||||
model_config["path"] = "/cpu_model.tflite"
|
||||
elif detector_config.type == "edgetpu":
|
||||
merged_model["path"] = "/edgetpu_model.tflite"
|
||||
model_config["path"] = "/edgetpu_model.tflite"
|
||||
|
||||
detector_config.model = ModelConfig.model_validate(merged_model)
|
||||
detector_config.model.check_and_load_plus_model(
|
||||
self.plus_api, detector_config.type
|
||||
)
|
||||
detector_config.model.compute_model_hash()
|
||||
model = ModelConfig.model_validate(model_config)
|
||||
model.check_and_load_plus_model(self.plus_api, detector_config.type)
|
||||
model.compute_model_hash()
|
||||
detector_config.model = model
|
||||
self.detectors[key] = detector_config
|
||||
|
||||
verify_semantic_search_dependent_configs(self)
|
||||
|
||||
@@ -29,6 +29,7 @@ class LoggerConfig(FrigateBaseModel):
|
||||
logging.getLogger().setLevel(self.default.value.upper())
|
||||
|
||||
log_levels = {
|
||||
"httpx": LogLevel.error,
|
||||
"werkzeug": LogLevel.error,
|
||||
"ws4py": LogLevel.error,
|
||||
**self.logs,
|
||||
|
||||
@@ -11,6 +11,9 @@ class StatsConfig(FrigateBaseModel):
|
||||
network_bandwidth: bool = Field(
|
||||
default=False, title="Enable network bandwidth for ffmpeg processes."
|
||||
)
|
||||
sriov: bool = Field(
|
||||
default=False, title="Treat device as SR-IOV to support GPU stats."
|
||||
)
|
||||
|
||||
|
||||
class TelemetryConfig(FrigateBaseModel):
|
||||
|
||||
@@ -65,6 +65,7 @@ INCLUDED_FFMPEG_VERSIONS = ["7.0", "5.0"]
|
||||
FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
|
||||
FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
|
||||
FFMPEG_HWACCEL_VULKAN = "preset-vulkan"
|
||||
FFMPEG_HVC1_ARGS = ["-tag:v", "hvc1"]
|
||||
|
||||
# Regex constants
|
||||
|
||||
|
||||
43
frigate/data_processing/post/api.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""Local or remote processors to handle post processing."""
|
||||
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
|
||||
from ..types import DataProcessorMetrics, PostProcessDataEnum
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PostProcessorApi(ABC):
|
||||
@abstractmethod
|
||||
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics) -> None:
|
||||
self.config = config
|
||||
self.metrics = metrics
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def process_data(
|
||||
self, data: dict[str, any], data_type: PostProcessDataEnum
|
||||
) -> None:
|
||||
"""Processes the data of data type.
|
||||
Args:
|
||||
data (dict): containing data about the input.
|
||||
data_type (enum): Describing the data that is being processed.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def handle_request(self, request_data: dict[str, any]) -> dict[str, any] | None:
|
||||
"""Handle metadata requests.
|
||||
Args:
|
||||
request_data (dict): containing data about requested change to process.
|
||||
|
||||
Returns:
|
||||
None if request was not handled, otherwise return response.
|
||||
"""
|
||||
pass
|
||||
57
frigate/data_processing/real_time/api.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""Local only processors for handling real time object processing."""
|
||||
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
import numpy as np
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
|
||||
from ..types import DataProcessorMetrics
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RealTimeProcessorApi(ABC):
|
||||
@abstractmethod
|
||||
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics) -> None:
|
||||
self.config = config
|
||||
self.metrics = metrics
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray) -> None:
|
||||
"""Processes the frame with object data.
|
||||
Args:
|
||||
obj_data (dict): containing data about focused object in frame.
|
||||
frame (ndarray): full yuv frame.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def handle_request(
|
||||
self, topic: str, request_data: dict[str, any]
|
||||
) -> dict[str, any] | None:
|
||||
"""Handle metadata requests.
|
||||
Args:
|
||||
topic (str): topic that dictates what work is requested.
|
||||
request_data (dict): containing data about requested change to process.
|
||||
|
||||
Returns:
|
||||
None if request was not handled, otherwise return response.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def expire_object(self, object_id: str) -> None:
|
||||
"""Handle objects that are no longer detected.
|
||||
Args:
|
||||
object_id (str): id of object that is no longer detected.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
"""
|
||||
pass
|
||||
154
frigate/data_processing/real_time/bird_processor.py
Normal file
@@ -0,0 +1,154 @@
|
||||
"""Handle processing images to classify birds."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import requests
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import FRIGATE_LOCALHOST, MODEL_CACHE_DIR
|
||||
from frigate.util.object import calculate_region
|
||||
|
||||
from ..types import DataProcessorMetrics
|
||||
from .api import RealTimeProcessorApi
|
||||
|
||||
try:
|
||||
from tflite_runtime.interpreter import Interpreter
|
||||
except ModuleNotFoundError:
|
||||
from tensorflow.lite.python.interpreter import Interpreter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BirdProcessor(RealTimeProcessorApi):
|
||||
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics):
|
||||
super().__init__(config, metrics)
|
||||
self.interpreter: Interpreter = None
|
||||
self.tensor_input_details: dict[str, any] = None
|
||||
self.tensor_output_details: dict[str, any] = None
|
||||
self.detected_birds: dict[str, float] = {}
|
||||
self.labelmap: dict[int, str] = {}
|
||||
|
||||
download_path = os.path.join(MODEL_CACHE_DIR, "bird")
|
||||
self.model_files = {
|
||||
"bird.tflite": "https://raw.githubusercontent.com/google-coral/test_data/master/mobilenet_v2_1.0_224_inat_bird_quant.tflite",
|
||||
"birdmap.txt": "https://raw.githubusercontent.com/google-coral/test_data/master/inat_bird_labels.txt",
|
||||
}
|
||||
|
||||
if not all(
|
||||
os.path.exists(os.path.join(download_path, n))
|
||||
for n in self.model_files.keys()
|
||||
):
|
||||
# conditionally import ModelDownloader
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
|
||||
self.downloader = ModelDownloader(
|
||||
model_name="bird",
|
||||
download_path=download_path,
|
||||
file_names=self.model_files.keys(),
|
||||
download_func=self.__download_models,
|
||||
complete_func=self.__build_detector,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
else:
|
||||
self.__build_detector()
|
||||
|
||||
def __download_models(self, path: str) -> None:
|
||||
try:
|
||||
file_name = os.path.basename(path)
|
||||
|
||||
# conditionally import ModelDownloader
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
|
||||
ModelDownloader.download_from_url(self.model_files[file_name], path)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to download {path}: {e}")
|
||||
|
||||
def __build_detector(self) -> None:
|
||||
self.interpreter = Interpreter(
|
||||
model_path=os.path.join(MODEL_CACHE_DIR, "bird/bird.tflite"),
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
|
||||
i = 0
|
||||
|
||||
with open(os.path.join(MODEL_CACHE_DIR, "bird/birdmap.txt")) as f:
|
||||
line = f.readline()
|
||||
while line:
|
||||
start = line.find("(")
|
||||
end = line.find(")")
|
||||
self.labelmap[i] = line[start + 1 : end]
|
||||
i += 1
|
||||
line = f.readline()
|
||||
|
||||
def process_frame(self, obj_data, frame):
|
||||
if obj_data["label"] != "bird":
|
||||
return
|
||||
|
||||
x, y, x2, y2 = calculate_region(
|
||||
frame.shape,
|
||||
obj_data["box"][0],
|
||||
obj_data["box"][1],
|
||||
obj_data["box"][2],
|
||||
obj_data["box"][3],
|
||||
224,
|
||||
1.0,
|
||||
)
|
||||
|
||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||
input = rgb[
|
||||
y:y2,
|
||||
x:x2,
|
||||
]
|
||||
|
||||
cv2.imwrite("/media/frigate/test_class.png", input)
|
||||
|
||||
input = np.expand_dims(input, axis=0)
|
||||
|
||||
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input)
|
||||
self.interpreter.invoke()
|
||||
res: np.ndarray = self.interpreter.get_tensor(
|
||||
self.tensor_output_details[0]["index"]
|
||||
)[0]
|
||||
probs = res / res.sum(axis=0)
|
||||
best_id = np.argmax(probs)
|
||||
|
||||
if best_id == 964:
|
||||
logger.debug("No bird classification was detected.")
|
||||
return
|
||||
|
||||
score = round(probs[best_id], 2)
|
||||
|
||||
if score < self.config.classification.bird.threshold:
|
||||
logger.debug(f"Score {score} is not above required threshold")
|
||||
return
|
||||
|
||||
previous_score = self.detected_birds.get(obj_data["id"], 0.0)
|
||||
|
||||
if score <= previous_score:
|
||||
logger.debug(f"Score {score} is worse than previous score {previous_score}")
|
||||
return
|
||||
|
||||
resp = requests.post(
|
||||
f"{FRIGATE_LOCALHOST}/api/events/{obj_data['id']}/sub_label",
|
||||
json={
|
||||
"camera": obj_data.get("camera"),
|
||||
"subLabel": self.labelmap[best_id],
|
||||
"subLabelScore": score,
|
||||
},
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
self.detected_birds[obj_data["id"]] = score
|
||||
|
||||
def handle_request(self, topic, request_data):
|
||||
return None
|
||||
|
||||
def expire_object(self, object_id):
|
||||
if object_id in self.detected_birds:
|
||||
self.detected_birds.pop(object_id)
|
||||
406
frigate/data_processing/real_time/face_processor.py
Normal file
@@ -0,0 +1,406 @@
|
||||
"""Handle processing images for face detection and recognition."""
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import requests
|
||||
|
||||
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import FACE_DIR, FRIGATE_LOCALHOST, MODEL_CACHE_DIR
|
||||
from frigate.util.image import area
|
||||
|
||||
from ..types import DataProcessorMetrics
|
||||
from .api import RealTimeProcessorApi
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
MIN_MATCHING_FACES = 2
|
||||
|
||||
|
||||
class FaceProcessor(RealTimeProcessorApi):
|
||||
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics):
|
||||
super().__init__(config, metrics)
|
||||
self.face_config = config.face_recognition
|
||||
self.face_detector: cv2.FaceDetectorYN = None
|
||||
self.landmark_detector: cv2.face.FacemarkLBF = None
|
||||
self.face_recognizer: cv2.face.LBPHFaceRecognizer = None
|
||||
self.requires_face_detection = "face" not in self.config.objects.all_objects
|
||||
self.detected_faces: dict[str, float] = {}
|
||||
|
||||
download_path = os.path.join(MODEL_CACHE_DIR, "facedet")
|
||||
self.model_files = {
|
||||
"facedet.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facedet.onnx",
|
||||
"landmarkdet.yaml": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/landmarkdet.yaml",
|
||||
}
|
||||
|
||||
if not all(
|
||||
os.path.exists(os.path.join(download_path, n))
|
||||
for n in self.model_files.keys()
|
||||
):
|
||||
# conditionally import ModelDownloader
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
|
||||
self.downloader = ModelDownloader(
|
||||
model_name="facedet",
|
||||
download_path=download_path,
|
||||
file_names=self.model_files.keys(),
|
||||
download_func=self.__download_models,
|
||||
complete_func=self.__build_detector,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
else:
|
||||
self.__build_detector()
|
||||
|
||||
self.label_map: dict[int, str] = {}
|
||||
self.__build_classifier()
|
||||
|
||||
def __download_models(self, path: str) -> None:
|
||||
try:
|
||||
file_name = os.path.basename(path)
|
||||
# conditionally import ModelDownloader
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
|
||||
ModelDownloader.download_from_url(self.model_files[file_name], path)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to download {path}: {e}")
|
||||
|
||||
def __build_detector(self) -> None:
|
||||
self.face_detector = cv2.FaceDetectorYN.create(
|
||||
"/config/model_cache/facedet/facedet.onnx",
|
||||
config="",
|
||||
input_size=(320, 320),
|
||||
score_threshold=0.8,
|
||||
nms_threshold=0.3,
|
||||
)
|
||||
self.landmark_detector = cv2.face.createFacemarkLBF()
|
||||
self.landmark_detector.loadModel("/config/model_cache/facedet/landmarkdet.yaml")
|
||||
|
||||
def __build_classifier(self) -> None:
|
||||
if not self.landmark_detector:
|
||||
return None
|
||||
|
||||
labels = []
|
||||
faces = []
|
||||
|
||||
dir = "/media/frigate/clips/faces"
|
||||
for idx, name in enumerate(os.listdir(dir)):
|
||||
if name == "train":
|
||||
continue
|
||||
|
||||
face_folder = os.path.join(dir, name)
|
||||
|
||||
if not os.path.isdir(face_folder):
|
||||
continue
|
||||
|
||||
self.label_map[idx] = name
|
||||
for image in os.listdir(face_folder):
|
||||
img = cv2.imread(os.path.join(face_folder, image))
|
||||
|
||||
if img is None:
|
||||
continue
|
||||
|
||||
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
img = self.__align_face(img, img.shape[1], img.shape[0])
|
||||
faces.append(img)
|
||||
labels.append(idx)
|
||||
|
||||
self.recognizer: cv2.face.LBPHFaceRecognizer = (
|
||||
cv2.face.LBPHFaceRecognizer_create(
|
||||
radius=2, threshold=(1 - self.face_config.min_score) * 1000
|
||||
)
|
||||
)
|
||||
self.recognizer.train(faces, np.array(labels))
|
||||
|
||||
def __align_face(
|
||||
self,
|
||||
image: np.ndarray,
|
||||
output_width: int,
|
||||
output_height: int,
|
||||
) -> np.ndarray:
|
||||
_, lands = self.landmark_detector.fit(
|
||||
image, np.array([(0, 0, image.shape[1], image.shape[0])])
|
||||
)
|
||||
landmarks: np.ndarray = lands[0][0]
|
||||
|
||||
# get landmarks for eyes
|
||||
leftEyePts = landmarks[42:48]
|
||||
rightEyePts = landmarks[36:42]
|
||||
|
||||
# compute the center of mass for each eye
|
||||
leftEyeCenter = leftEyePts.mean(axis=0).astype("int")
|
||||
rightEyeCenter = rightEyePts.mean(axis=0).astype("int")
|
||||
|
||||
# compute the angle between the eye centroids
|
||||
dY = rightEyeCenter[1] - leftEyeCenter[1]
|
||||
dX = rightEyeCenter[0] - leftEyeCenter[0]
|
||||
angle = np.degrees(np.arctan2(dY, dX)) - 180
|
||||
|
||||
# compute the desired right eye x-coordinate based on the
|
||||
# desired x-coordinate of the left eye
|
||||
desiredRightEyeX = 1.0 - 0.35
|
||||
|
||||
# determine the scale of the new resulting image by taking
|
||||
# the ratio of the distance between eyes in the *current*
|
||||
# image to the ratio of distance between eyes in the
|
||||
# *desired* image
|
||||
dist = np.sqrt((dX**2) + (dY**2))
|
||||
desiredDist = desiredRightEyeX - 0.35
|
||||
desiredDist *= output_width
|
||||
scale = desiredDist / dist
|
||||
|
||||
# compute center (x, y)-coordinates (i.e., the median point)
|
||||
# between the two eyes in the input image
|
||||
# grab the rotation matrix for rotating and scaling the face
|
||||
eyesCenter = (
|
||||
int((leftEyeCenter[0] + rightEyeCenter[0]) // 2),
|
||||
int((leftEyeCenter[1] + rightEyeCenter[1]) // 2),
|
||||
)
|
||||
M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
|
||||
|
||||
# update the translation component of the matrix
|
||||
tX = output_width * 0.5
|
||||
tY = output_height * 0.35
|
||||
M[0, 2] += tX - eyesCenter[0]
|
||||
M[1, 2] += tY - eyesCenter[1]
|
||||
|
||||
# apply the affine transformation
|
||||
return cv2.warpAffine(
|
||||
image, M, (output_width, output_height), flags=cv2.INTER_CUBIC
|
||||
)
|
||||
|
||||
def __clear_classifier(self) -> None:
|
||||
self.face_recognizer = None
|
||||
self.label_map = {}
|
||||
|
||||
def __detect_face(self, input: np.ndarray) -> tuple[int, int, int, int]:
|
||||
"""Detect faces in input image."""
|
||||
if not self.face_detector:
|
||||
return None
|
||||
|
||||
self.face_detector.setInputSize((input.shape[1], input.shape[0]))
|
||||
faces = self.face_detector.detect(input)
|
||||
|
||||
if faces is None or faces[1] is None:
|
||||
return None
|
||||
|
||||
face = None
|
||||
|
||||
for _, potential_face in enumerate(faces[1]):
|
||||
raw_bbox = potential_face[0:4].astype(np.uint16)
|
||||
x: int = max(raw_bbox[0], 0)
|
||||
y: int = max(raw_bbox[1], 0)
|
||||
w: int = raw_bbox[2]
|
||||
h: int = raw_bbox[3]
|
||||
bbox = (x, y, x + w, y + h)
|
||||
|
||||
if face is None or area(bbox) > area(face):
|
||||
face = bbox
|
||||
|
||||
return face
|
||||
|
||||
def __classify_face(self, face_image: np.ndarray) -> tuple[str, float] | None:
|
||||
if not self.landmark_detector:
|
||||
return None
|
||||
|
||||
if not self.label_map:
|
||||
self.__build_classifier()
|
||||
|
||||
img = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
|
||||
img = self.__align_face(img, img.shape[1], img.shape[0])
|
||||
index, distance = self.recognizer.predict(img)
|
||||
|
||||
if index == -1:
|
||||
return None
|
||||
|
||||
score = 1.0 - (distance / 1000)
|
||||
return self.label_map[index], round(score, 2)
|
||||
|
||||
def __update_metrics(self, duration: float) -> None:
|
||||
self.metrics.face_rec_fps.value = (
|
||||
self.metrics.face_rec_fps.value * 9 + duration
|
||||
) / 10
|
||||
|
||||
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):
|
||||
"""Look for faces in image."""
|
||||
start = datetime.datetime.now().timestamp()
|
||||
id = obj_data["id"]
|
||||
|
||||
# don't run for non person objects
|
||||
if obj_data.get("label") != "person":
|
||||
logger.debug("Not a processing face for non person object.")
|
||||
return
|
||||
|
||||
# don't overwrite sub label for objects that have a sub label
|
||||
# that is not a face
|
||||
if obj_data.get("sub_label") and id not in self.detected_faces:
|
||||
logger.debug(
|
||||
f"Not processing face due to existing sub label: {obj_data.get('sub_label')}."
|
||||
)
|
||||
return
|
||||
|
||||
face: Optional[dict[str, any]] = None
|
||||
|
||||
if self.requires_face_detection:
|
||||
logger.debug("Running manual face detection.")
|
||||
person_box = obj_data.get("box")
|
||||
|
||||
if not person_box:
|
||||
return
|
||||
|
||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||
left, top, right, bottom = person_box
|
||||
person = rgb[top:bottom, left:right]
|
||||
face_box = self.__detect_face(person)
|
||||
|
||||
if not face_box:
|
||||
logger.debug("Detected no faces for person object.")
|
||||
return
|
||||
|
||||
face_frame = person[
|
||||
max(0, face_box[1]) : min(frame.shape[0], face_box[3]),
|
||||
max(0, face_box[0]) : min(frame.shape[1], face_box[2]),
|
||||
]
|
||||
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
|
||||
else:
|
||||
# don't run for object without attributes
|
||||
if not obj_data.get("current_attributes"):
|
||||
logger.debug("No attributes to parse.")
|
||||
return
|
||||
|
||||
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
|
||||
for attr in attributes:
|
||||
if attr.get("label") != "face":
|
||||
continue
|
||||
|
||||
if face is None or attr.get("score", 0.0) > face.get("score", 0.0):
|
||||
face = attr
|
||||
|
||||
# no faces detected in this frame
|
||||
if not face:
|
||||
return
|
||||
|
||||
face_box = face.get("box")
|
||||
|
||||
# check that face is valid
|
||||
if not face_box or area(face_box) < self.config.face_recognition.min_area:
|
||||
logger.debug(f"Invalid face box {face}")
|
||||
return
|
||||
|
||||
face_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
|
||||
|
||||
face_frame = face_frame[
|
||||
max(0, face_box[1]) : min(frame.shape[0], face_box[3]),
|
||||
max(0, face_box[0]) : min(frame.shape[1], face_box[2]),
|
||||
]
|
||||
|
||||
res = self.__classify_face(face_frame)
|
||||
|
||||
if not res:
|
||||
return
|
||||
|
||||
sub_label, score = res
|
||||
|
||||
# calculate the overall face score as the probability * area of face
|
||||
# this will help to reduce false positives from small side-angle faces
|
||||
# if a large front-on face image may have scored slightly lower but
|
||||
# is more likely to be accurate due to the larger face area
|
||||
face_score = round(score * face_frame.shape[0] * face_frame.shape[1], 2)
|
||||
|
||||
logger.debug(
|
||||
f"Detected best face for person as: {sub_label} with probability {score} and overall face score {face_score}"
|
||||
)
|
||||
|
||||
if self.config.face_recognition.save_attempts:
|
||||
# write face to library
|
||||
folder = os.path.join(FACE_DIR, "train")
|
||||
file = os.path.join(folder, f"{id}-{sub_label}-{score}-{face_score}.webp")
|
||||
os.makedirs(folder, exist_ok=True)
|
||||
cv2.imwrite(file, face_frame)
|
||||
|
||||
if score < self.config.face_recognition.threshold:
|
||||
logger.debug(
|
||||
f"Recognized face distance {score} is less than threshold {self.config.face_recognition.threshold}"
|
||||
)
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
||||
return
|
||||
|
||||
if id in self.detected_faces and face_score <= self.detected_faces[id]:
|
||||
logger.debug(
|
||||
f"Recognized face distance {score} and overall score {face_score} is less than previous overall face score ({self.detected_faces.get(id)})."
|
||||
)
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
||||
return
|
||||
|
||||
resp = requests.post(
|
||||
f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label",
|
||||
json={
|
||||
"camera": obj_data.get("camera"),
|
||||
"subLabel": sub_label,
|
||||
"subLabelScore": score,
|
||||
},
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
self.detected_faces[id] = face_score
|
||||
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
||||
|
||||
def handle_request(self, topic, request_data) -> dict[str, any] | None:
|
||||
if topic == EmbeddingsRequestEnum.clear_face_classifier.value:
|
||||
self.__clear_classifier()
|
||||
elif topic == EmbeddingsRequestEnum.register_face.value:
|
||||
rand_id = "".join(
|
||||
random.choices(string.ascii_lowercase + string.digits, k=6)
|
||||
)
|
||||
label = request_data["face_name"]
|
||||
id = f"{label}-{rand_id}"
|
||||
|
||||
if request_data.get("cropped"):
|
||||
thumbnail = request_data["image"]
|
||||
else:
|
||||
img = cv2.imdecode(
|
||||
np.frombuffer(
|
||||
base64.b64decode(request_data["image"]), dtype=np.uint8
|
||||
),
|
||||
cv2.IMREAD_COLOR,
|
||||
)
|
||||
face_box = self.__detect_face(img)
|
||||
|
||||
if not face_box:
|
||||
return {
|
||||
"message": "No face was detected.",
|
||||
"success": False,
|
||||
}
|
||||
|
||||
face = img[face_box[1] : face_box[3], face_box[0] : face_box[2]]
|
||||
_, thumbnail = cv2.imencode(
|
||||
".webp", face, [int(cv2.IMWRITE_WEBP_QUALITY), 100]
|
||||
)
|
||||
|
||||
# write face to library
|
||||
folder = os.path.join(FACE_DIR, label)
|
||||
file = os.path.join(folder, f"{id}.webp")
|
||||
os.makedirs(folder, exist_ok=True)
|
||||
|
||||
# save face image
|
||||
with open(file, "wb") as output:
|
||||
output.write(thumbnail.tobytes())
|
||||
|
||||
self.__clear_classifier()
|
||||
return {
|
||||
"message": "Successfully registered face.",
|
||||
"success": True,
|
||||
}
|
||||
|
||||
def expire_object(self, object_id: str):
|
||||
if object_id in self.detected_faces:
|
||||
self.detected_faces.pop(object_id)
|
||||
24
frigate/data_processing/types.py
Normal file
@@ -0,0 +1,24 @@
|
||||
"""Embeddings types."""
|
||||
|
||||
import multiprocessing as mp
|
||||
from enum import Enum
|
||||
from multiprocessing.sharedctypes import Synchronized
|
||||
|
||||
|
||||
class DataProcessorMetrics:
|
||||
image_embeddings_fps: Synchronized
|
||||
text_embeddings_sps: Synchronized
|
||||
face_rec_fps: Synchronized
|
||||
alpr_pps: Synchronized
|
||||
|
||||
def __init__(self):
|
||||
self.image_embeddings_fps = mp.Value("d", 0.01)
|
||||
self.text_embeddings_sps = mp.Value("d", 0.01)
|
||||
self.face_rec_fps = mp.Value("d", 0.01)
|
||||
self.alpr_pps = mp.Value("d", 0.01)
|
||||
|
||||
|
||||
class PostProcessDataEnum(str, Enum):
|
||||
recording = "recording"
|
||||
review = "review"
|
||||
tracked_object = "tracked_object"
|
||||
@@ -194,6 +194,9 @@ class BaseDetectorConfig(BaseModel):
|
||||
model: Optional[ModelConfig] = Field(
|
||||
default=None, title="Detector specific model configuration."
|
||||
)
|
||||
model_path: Optional[str] = Field(
|
||||
default=None, title="Detector specific model path."
|
||||
)
|
||||
model_config = ConfigDict(
|
||||
extra="allow", arbitrary_types_allowed=True, protected_namespaces=()
|
||||
)
|
||||
|
||||
@@ -108,7 +108,7 @@ class Rknn(DetectionApi):
|
||||
model_props["model_type"] = model_type
|
||||
|
||||
if model_matched:
|
||||
model_props["filename"] = model_path + f"-{soc}-v2.0.0-1.rknn"
|
||||
model_props["filename"] = model_path + f"-{soc}-v2.3.0-1.rknn"
|
||||
|
||||
model_props["path"] = model_cache_dir + model_props["filename"]
|
||||
|
||||
@@ -129,7 +129,7 @@ class Rknn(DetectionApi):
|
||||
os.mkdir(model_cache_dir)
|
||||
|
||||
urllib.request.urlretrieve(
|
||||
f"https://github.com/MarcA711/rknn-models/releases/download/v2.0.0/{filename}",
|
||||
f"https://github.com/MarcA711/rknn-models/releases/download/v2.3.0/{filename}",
|
||||
model_cache_dir + filename,
|
||||
)
|
||||
|
||||
|
||||
@@ -219,19 +219,19 @@ class TensorRtDetector(DetectionApi):
|
||||
]
|
||||
|
||||
def __init__(self, detector_config: TensorRTDetectorConfig):
|
||||
assert (
|
||||
TRT_SUPPORT
|
||||
), f"TensorRT libraries not found, {DETECTOR_KEY} detector not present"
|
||||
assert TRT_SUPPORT, (
|
||||
f"TensorRT libraries not found, {DETECTOR_KEY} detector not present"
|
||||
)
|
||||
|
||||
(cuda_err,) = cuda.cuInit(0)
|
||||
assert (
|
||||
cuda_err == cuda.CUresult.CUDA_SUCCESS
|
||||
), f"Failed to initialize cuda {cuda_err}"
|
||||
assert cuda_err == cuda.CUresult.CUDA_SUCCESS, (
|
||||
f"Failed to initialize cuda {cuda_err}"
|
||||
)
|
||||
err, dev_count = cuda.cuDeviceGetCount()
|
||||
logger.debug(f"Num Available Devices: {dev_count}")
|
||||
assert (
|
||||
detector_config.device < dev_count
|
||||
), f"Invalid TensorRT Device Config. Device {detector_config.device} Invalid."
|
||||
assert detector_config.device < dev_count, (
|
||||
f"Invalid TensorRT Device Config. Device {detector_config.device} Invalid."
|
||||
)
|
||||
err, self.cu_ctx = cuda.cuCtxCreate(
|
||||
cuda.CUctx_flags.CU_CTX_MAP_HOST, detector_config.device
|
||||
)
|
||||
|
||||
@@ -15,6 +15,7 @@ from setproctitle import setproctitle
|
||||
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import CONFIG_DIR, FACE_DIR
|
||||
from frigate.data_processing.types import DataProcessorMetrics
|
||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.models import Event
|
||||
from frigate.util.builtin import serialize
|
||||
@@ -26,7 +27,7 @@ from .util import ZScoreNormalization
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def manage_embeddings(config: FrigateConfig) -> None:
|
||||
def manage_embeddings(config: FrigateConfig, metrics: DataProcessorMetrics) -> None:
|
||||
# Only initialize embeddings if semantic search is enabled
|
||||
if not config.semantic_search.enabled:
|
||||
return
|
||||
@@ -60,6 +61,7 @@ def manage_embeddings(config: FrigateConfig) -> None:
|
||||
maintainer = EmbeddingMaintainer(
|
||||
db,
|
||||
config,
|
||||
metrics,
|
||||
stop_event,
|
||||
)
|
||||
maintainer.start()
|
||||
@@ -190,8 +192,8 @@ class EmbeddingsContext:
|
||||
|
||||
return results
|
||||
|
||||
def register_face(self, face_name: str, image_data: bytes) -> None:
|
||||
self.requestor.send_data(
|
||||
def register_face(self, face_name: str, image_data: bytes) -> dict[str, any]:
|
||||
return self.requestor.send_data(
|
||||
EmbeddingsRequestEnum.register_face.value,
|
||||
{
|
||||
"face_name": face_name,
|
||||
@@ -209,6 +211,11 @@ class EmbeddingsContext:
|
||||
|
||||
return self.db.execute_sql(sql_query).fetchall()
|
||||
|
||||
def clear_face_classifier(self) -> None:
|
||||
self.requestor.send_data(
|
||||
EmbeddingsRequestEnum.clear_face_classifier.value, None
|
||||
)
|
||||
|
||||
def delete_face_ids(self, face: str, ids: list[str]) -> None:
|
||||
folder = os.path.join(FACE_DIR, face)
|
||||
for id in ids:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""SQLite-vec embeddings database."""
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
@@ -15,6 +16,7 @@ from frigate.const import (
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
|
||||
UPDATE_MODEL_STATE,
|
||||
)
|
||||
from frigate.data_processing.types import DataProcessorMetrics
|
||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.models import Event
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
@@ -59,9 +61,15 @@ def get_metadata(event: Event) -> dict:
|
||||
class Embeddings:
|
||||
"""SQLite-vec embeddings database."""
|
||||
|
||||
def __init__(self, config: FrigateConfig, db: SqliteVecQueueDatabase) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
db: SqliteVecQueueDatabase,
|
||||
metrics: DataProcessorMetrics,
|
||||
) -> None:
|
||||
self.config = config
|
||||
self.db = db
|
||||
self.metrics = metrics
|
||||
self.requestor = InterProcessRequestor()
|
||||
|
||||
# Create tables if they don't exist
|
||||
@@ -123,19 +131,6 @@ class Embeddings:
|
||||
device="GPU" if config.semantic_search.model_size == "large" else "CPU",
|
||||
)
|
||||
|
||||
if self.config.face_recognition.enabled:
|
||||
self.face_embedding = GenericONNXEmbedding(
|
||||
model_name="facedet",
|
||||
model_file="facedet.onnx",
|
||||
download_urls={
|
||||
"facedet.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facedet.onnx",
|
||||
"landmarkdet.yaml": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/landmarkdet.yaml",
|
||||
},
|
||||
model_size="small",
|
||||
model_type=ModelTypeEnum.face,
|
||||
requestor=self.requestor,
|
||||
)
|
||||
|
||||
self.lpr_detection_model = None
|
||||
self.lpr_classification_model = None
|
||||
self.lpr_recognition_model = None
|
||||
@@ -186,6 +181,7 @@ class Embeddings:
|
||||
@param: thumbnail bytes in jpg format
|
||||
@param: upsert If embedding should be upserted into vec DB
|
||||
"""
|
||||
start = datetime.datetime.now().timestamp()
|
||||
# Convert thumbnail bytes to PIL Image
|
||||
embedding = self.vision_embedding([thumbnail])[0]
|
||||
|
||||
@@ -198,6 +194,11 @@ class Embeddings:
|
||||
(event_id, serialize(embedding)),
|
||||
)
|
||||
|
||||
duration = datetime.datetime.now().timestamp() - start
|
||||
self.metrics.image_embeddings_fps.value = (
|
||||
self.metrics.image_embeddings_fps.value * 9 + duration
|
||||
) / 10
|
||||
|
||||
return embedding
|
||||
|
||||
def batch_embed_thumbnail(
|
||||
@@ -208,6 +209,7 @@ class Embeddings:
|
||||
@param: event_thumbs Map of Event IDs in DB to thumbnail bytes in jpg format
|
||||
@param: upsert If embedding should be upserted into vec DB
|
||||
"""
|
||||
start = datetime.datetime.now().timestamp()
|
||||
ids = list(event_thumbs.keys())
|
||||
embeddings = self.vision_embedding(list(event_thumbs.values()))
|
||||
|
||||
@@ -226,11 +228,17 @@ class Embeddings:
|
||||
items,
|
||||
)
|
||||
|
||||
duration = datetime.datetime.now().timestamp() - start
|
||||
self.metrics.text_embeddings_sps.value = (
|
||||
self.metrics.text_embeddings_sps.value * 9 + (duration / len(ids))
|
||||
) / 10
|
||||
|
||||
return embeddings
|
||||
|
||||
def embed_description(
|
||||
self, event_id: str, description: str, upsert: bool = True
|
||||
) -> ndarray:
|
||||
start = datetime.datetime.now().timestamp()
|
||||
embedding = self.text_embedding([description])[0]
|
||||
|
||||
if upsert:
|
||||
@@ -242,11 +250,17 @@ class Embeddings:
|
||||
(event_id, serialize(embedding)),
|
||||
)
|
||||
|
||||
duration = datetime.datetime.now().timestamp() - start
|
||||
self.metrics.text_embeddings_sps.value = (
|
||||
self.metrics.text_embeddings_sps.value * 9 + duration
|
||||
) / 10
|
||||
|
||||
return embedding
|
||||
|
||||
def batch_embed_description(
|
||||
self, event_descriptions: dict[str, str], upsert: bool = True
|
||||
) -> ndarray:
|
||||
start = datetime.datetime.now().timestamp()
|
||||
# upsert embeddings one by one to avoid token limit
|
||||
embeddings = []
|
||||
|
||||
@@ -269,6 +283,11 @@ class Embeddings:
|
||||
items,
|
||||
)
|
||||
|
||||
duration = datetime.datetime.now().timestamp() - start
|
||||
self.metrics.text_embeddings_sps.value = (
|
||||
self.metrics.text_embeddings_sps.value * 9 + (duration / len(ids))
|
||||
) / 10
|
||||
|
||||
return embeddings
|
||||
|
||||
def reindex(self) -> None:
|
||||
|
||||
@@ -8,7 +8,7 @@ from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
|
||||
from shapely.geometry import Polygon
|
||||
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config.semantic_search import LicensePlateRecognitionConfig
|
||||
from frigate.config.classification import LicensePlateRecognitionConfig
|
||||
from frigate.embeddings.embeddings import Embeddings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
"""Maintain embeddings in SQLite-vec."""
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
import threading
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from pathlib import Path
|
||||
@@ -27,10 +26,13 @@ from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import (
|
||||
CLIPS_DIR,
|
||||
FACE_DIR,
|
||||
FRIGATE_LOCALHOST,
|
||||
UPDATE_EVENT_DESCRIPTION,
|
||||
)
|
||||
from frigate.data_processing.real_time.api import RealTimeProcessorApi
|
||||
from frigate.data_processing.real_time.bird_processor import BirdProcessor
|
||||
from frigate.data_processing.real_time.face_processor import FaceProcessor
|
||||
from frigate.data_processing.types import DataProcessorMetrics
|
||||
from frigate.embeddings.lpr.lpr import LicensePlateRecognition
|
||||
from frigate.events.types import EventTypeEnum
|
||||
from frigate.genai import get_genai_client
|
||||
@@ -38,7 +40,6 @@ from frigate.models import Event
|
||||
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||
from frigate.util.builtin import serialize
|
||||
from frigate.util.image import SharedMemoryFrameManager, area, calculate_region
|
||||
from frigate.util.model import FaceClassificationModel
|
||||
|
||||
from .embeddings import Embeddings
|
||||
|
||||
@@ -54,11 +55,13 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self,
|
||||
db: SqliteQueueDatabase,
|
||||
config: FrigateConfig,
|
||||
metrics: DataProcessorMetrics,
|
||||
stop_event: MpEvent,
|
||||
) -> None:
|
||||
super().__init__(name="embeddings_maintainer")
|
||||
self.config = config
|
||||
self.embeddings = Embeddings(config, db)
|
||||
self.metrics = metrics
|
||||
self.embeddings = Embeddings(config, db, metrics)
|
||||
|
||||
# Check if we need to re-index events
|
||||
if config.semantic_search.reindex:
|
||||
@@ -71,16 +74,13 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
)
|
||||
self.embeddings_responder = EmbeddingsResponder()
|
||||
self.frame_manager = SharedMemoryFrameManager()
|
||||
self.processors: list[RealTimeProcessorApi] = []
|
||||
|
||||
# set face recognition conditions
|
||||
self.face_recognition_enabled = self.config.face_recognition.enabled
|
||||
self.requires_face_detection = "face" not in self.config.objects.all_objects
|
||||
self.detected_faces: dict[str, float] = {}
|
||||
self.face_classifier = (
|
||||
FaceClassificationModel(self.config.face_recognition, db)
|
||||
if self.face_recognition_enabled
|
||||
else None
|
||||
)
|
||||
if self.config.face_recognition.enabled:
|
||||
self.processors.append(FaceProcessor(self.config, metrics))
|
||||
|
||||
if self.config.classification.bird.enabled:
|
||||
self.processors.append(BirdProcessor(self.config, metrics))
|
||||
|
||||
# create communication for updating event descriptions
|
||||
self.requestor = InterProcessRequestor()
|
||||
@@ -100,19 +100,6 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self.lpr_config, self.requestor, self.embeddings
|
||||
)
|
||||
|
||||
@property
|
||||
def face_detector(self) -> cv2.FaceDetectorYN:
|
||||
# Lazily create the classifier.
|
||||
if "face_detector" not in self.__dict__:
|
||||
self.__dict__["face_detector"] = cv2.FaceDetectorYN.create(
|
||||
"/config/model_cache/facedet/facedet.onnx",
|
||||
config="",
|
||||
input_size=(320, 320),
|
||||
score_threshold=0.8,
|
||||
nms_threshold=0.3,
|
||||
)
|
||||
return self.__dict__["face_detector"]
|
||||
|
||||
def run(self) -> None:
|
||||
"""Maintain a SQLite-vec database for semantic search."""
|
||||
while not self.stop_event.is_set():
|
||||
@@ -148,48 +135,15 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
)
|
||||
elif topic == EmbeddingsRequestEnum.generate_search.value:
|
||||
return serialize(
|
||||
self.embeddings.text_embedding([data])[0], pack=False
|
||||
self.embeddings.embed_description("", data, upsert=False),
|
||||
pack=False,
|
||||
)
|
||||
elif topic == EmbeddingsRequestEnum.register_face.value:
|
||||
if not self.face_recognition_enabled:
|
||||
return False
|
||||
else:
|
||||
for processor in self.processors:
|
||||
resp = processor.handle_request(topic, data)
|
||||
|
||||
rand_id = "".join(
|
||||
random.choices(string.ascii_lowercase + string.digits, k=6)
|
||||
)
|
||||
label = data["face_name"]
|
||||
id = f"{label}-{rand_id}"
|
||||
|
||||
if data.get("cropped"):
|
||||
pass
|
||||
else:
|
||||
img = cv2.imdecode(
|
||||
np.frombuffer(
|
||||
base64.b64decode(data["image"]), dtype=np.uint8
|
||||
),
|
||||
cv2.IMREAD_COLOR,
|
||||
)
|
||||
face_box = self._detect_face(img)
|
||||
|
||||
if not face_box:
|
||||
return False
|
||||
|
||||
face = img[face_box[1] : face_box[3], face_box[0] : face_box[2]]
|
||||
ret, thumbnail = cv2.imencode(
|
||||
".webp", face, [int(cv2.IMWRITE_WEBP_QUALITY), 100]
|
||||
)
|
||||
|
||||
# write face to library
|
||||
folder = os.path.join(FACE_DIR, label)
|
||||
file = os.path.join(folder, f"{id}.webp")
|
||||
os.makedirs(folder, exist_ok=True)
|
||||
|
||||
# save face image
|
||||
with open(file, "wb") as output:
|
||||
output.write(thumbnail.tobytes())
|
||||
|
||||
self.face_classifier.clear_classifier()
|
||||
return True
|
||||
if resp is not None:
|
||||
return resp
|
||||
except Exception as e:
|
||||
logger.error(f"Unable to handle embeddings request {e}")
|
||||
|
||||
@@ -212,8 +166,8 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
# no need to process updated objects if face recognition, lpr, genai are disabled
|
||||
if (
|
||||
not camera_config.genai.enabled
|
||||
and not self.face_recognition_enabled
|
||||
and not self.lpr_config.enabled
|
||||
and len(self.processors) == 0
|
||||
):
|
||||
return
|
||||
|
||||
@@ -231,11 +185,18 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
)
|
||||
return
|
||||
|
||||
if self.face_recognition_enabled:
|
||||
self._process_face(data, yuv_frame)
|
||||
for processor in self.processors:
|
||||
processor.process_frame(data, yuv_frame)
|
||||
|
||||
if self.lpr_config.enabled:
|
||||
self._process_license_plate(data, yuv_frame)
|
||||
start = datetime.datetime.now().timestamp()
|
||||
processed = self._process_license_plate(data, yuv_frame)
|
||||
|
||||
if processed:
|
||||
duration = datetime.datetime.now().timestamp() - start
|
||||
self.metrics.alpr_pps.value = (
|
||||
self.metrics.alpr_pps.value * 9 + duration
|
||||
) / 10
|
||||
|
||||
# no need to save our own thumbnails if genai is not enabled
|
||||
# or if the object has become stationary
|
||||
@@ -265,8 +226,8 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
event_id, camera, updated_db = ended
|
||||
camera_config = self.config.cameras[camera]
|
||||
|
||||
if event_id in self.detected_faces:
|
||||
self.detected_faces.pop(event_id)
|
||||
for processor in self.processors:
|
||||
processor.expire_object(event_id)
|
||||
|
||||
if event_id in self.detected_license_plates:
|
||||
self.detected_license_plates.pop(event_id)
|
||||
@@ -393,159 +354,6 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
if event_id:
|
||||
self.handle_regenerate_description(event_id, source)
|
||||
|
||||
def _detect_face(self, input: np.ndarray) -> tuple[int, int, int, int]:
|
||||
"""Detect faces in input image."""
|
||||
self.face_detector.setInputSize((input.shape[1], input.shape[0]))
|
||||
faces = self.face_detector.detect(input)
|
||||
|
||||
if faces[1] is None:
|
||||
return None
|
||||
|
||||
face = None
|
||||
|
||||
for _, potential_face in enumerate(faces[1]):
|
||||
raw_bbox = potential_face[0:4].astype(np.uint16)
|
||||
x: int = max(raw_bbox[0], 0)
|
||||
y: int = max(raw_bbox[1], 0)
|
||||
w: int = raw_bbox[2]
|
||||
h: int = raw_bbox[3]
|
||||
bbox = (x, y, x + w, y + h)
|
||||
|
||||
if face is None or area(bbox) > area(face):
|
||||
face = bbox
|
||||
|
||||
return face
|
||||
|
||||
def _process_face(self, obj_data: dict[str, any], frame: np.ndarray) -> None:
|
||||
"""Look for faces in image."""
|
||||
id = obj_data["id"]
|
||||
|
||||
# don't run for non person objects
|
||||
if obj_data.get("label") != "person":
|
||||
logger.debug("Not a processing face for non person object.")
|
||||
return
|
||||
|
||||
# don't overwrite sub label for objects that have a sub label
|
||||
# that is not a face
|
||||
if obj_data.get("sub_label") and id not in self.detected_faces:
|
||||
logger.debug(
|
||||
f"Not processing face due to existing sub label: {obj_data.get('sub_label')}."
|
||||
)
|
||||
return
|
||||
|
||||
face: Optional[dict[str, any]] = None
|
||||
|
||||
if self.requires_face_detection:
|
||||
logger.debug("Running manual face detection.")
|
||||
person_box = obj_data.get("box")
|
||||
|
||||
if not person_box:
|
||||
return None
|
||||
|
||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||
left, top, right, bottom = person_box
|
||||
person = rgb[top:bottom, left:right]
|
||||
face_box = self._detect_face(person)
|
||||
|
||||
if not face_box:
|
||||
logger.debug("Detected no faces for person object.")
|
||||
return
|
||||
|
||||
margin = int((face_box[2] - face_box[0]) * 0.25)
|
||||
face_frame = person[
|
||||
max(0, face_box[1] - margin) : min(
|
||||
frame.shape[0], face_box[3] + margin
|
||||
),
|
||||
max(0, face_box[0] - margin) : min(
|
||||
frame.shape[1], face_box[2] + margin
|
||||
),
|
||||
]
|
||||
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
|
||||
else:
|
||||
# don't run for object without attributes
|
||||
if not obj_data.get("current_attributes"):
|
||||
logger.debug("No attributes to parse.")
|
||||
return
|
||||
|
||||
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
|
||||
for attr in attributes:
|
||||
if attr.get("label") != "face":
|
||||
continue
|
||||
|
||||
if face is None or attr.get("score", 0.0) > face.get("score", 0.0):
|
||||
face = attr
|
||||
|
||||
# no faces detected in this frame
|
||||
if not face:
|
||||
return
|
||||
|
||||
face_box = face.get("box")
|
||||
|
||||
# check that face is valid
|
||||
if not face_box or area(face_box) < self.config.face_recognition.min_area:
|
||||
logger.debug(f"Invalid face box {face}")
|
||||
return
|
||||
|
||||
face_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
|
||||
margin = int((face_box[2] - face_box[0]) * 0.25)
|
||||
|
||||
face_frame = face_frame[
|
||||
max(0, face_box[1] - margin) : min(
|
||||
frame.shape[0], face_box[3] + margin
|
||||
),
|
||||
max(0, face_box[0] - margin) : min(
|
||||
frame.shape[1], face_box[2] + margin
|
||||
),
|
||||
]
|
||||
|
||||
res = self.face_classifier.classify_face(face_frame)
|
||||
|
||||
if not res:
|
||||
return
|
||||
|
||||
sub_label, score = res
|
||||
|
||||
# calculate the overall face score as the probability * area of face
|
||||
# this will help to reduce false positives from small side-angle faces
|
||||
# if a large front-on face image may have scored slightly lower but
|
||||
# is more likely to be accurate due to the larger face area
|
||||
face_score = round(score * face_frame.shape[0] * face_frame.shape[1], 2)
|
||||
|
||||
logger.debug(
|
||||
f"Detected best face for person as: {sub_label} with probability {score} and overall face score {face_score}"
|
||||
)
|
||||
|
||||
if self.config.face_recognition.save_attempts:
|
||||
# write face to library
|
||||
folder = os.path.join(FACE_DIR, "debug")
|
||||
file = os.path.join(folder, f"{id}-{sub_label}-{score}-{face_score}.webp")
|
||||
os.makedirs(folder, exist_ok=True)
|
||||
cv2.imwrite(file, face_frame)
|
||||
|
||||
if score < self.config.face_recognition.threshold:
|
||||
logger.debug(
|
||||
f"Recognized face distance {score} is less than threshold {self.config.face_recognition.threshold}"
|
||||
)
|
||||
return
|
||||
|
||||
if id in self.detected_faces and face_score <= self.detected_faces[id]:
|
||||
logger.debug(
|
||||
f"Recognized face distance {score} and overall score {face_score} is less than previous overall face score ({self.detected_faces.get(id)})."
|
||||
)
|
||||
return
|
||||
|
||||
resp = requests.post(
|
||||
f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label",
|
||||
json={
|
||||
"camera": obj_data.get("camera"),
|
||||
"subLabel": sub_label,
|
||||
"subLabelScore": score,
|
||||
},
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
self.detected_faces[id] = face_score
|
||||
|
||||
def _detect_license_plate(self, input: np.ndarray) -> tuple[int, int, int, int]:
|
||||
"""Return the dimensions of the input image as [x, y, width, height]."""
|
||||
height, width = input.shape[:2]
|
||||
@@ -553,19 +361,19 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
def _process_license_plate(
|
||||
self, obj_data: dict[str, any], frame: np.ndarray
|
||||
) -> None:
|
||||
) -> bool:
|
||||
"""Look for license plates in image."""
|
||||
id = obj_data["id"]
|
||||
|
||||
# don't run for non car objects
|
||||
if obj_data.get("label") != "car":
|
||||
logger.debug("Not a processing license plate for non car object.")
|
||||
return
|
||||
return False
|
||||
|
||||
# don't run for stationary car objects
|
||||
if obj_data.get("stationary") == True:
|
||||
logger.debug("Not a processing license plate for a stationary car object.")
|
||||
return
|
||||
return False
|
||||
|
||||
# don't overwrite sub label for objects that have a sub label
|
||||
# that is not a license plate
|
||||
@@ -573,7 +381,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
logger.debug(
|
||||
f"Not processing license plate due to existing sub label: {obj_data.get('sub_label')}."
|
||||
)
|
||||
return
|
||||
return False
|
||||
|
||||
license_plate: Optional[dict[str, any]] = None
|
||||
|
||||
@@ -582,7 +390,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
car_box = obj_data.get("box")
|
||||
|
||||
if not car_box:
|
||||
return None
|
||||
return False
|
||||
|
||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||
left, top, right, bottom = car_box
|
||||
@@ -591,7 +399,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
if not license_plate:
|
||||
logger.debug("Detected no license plates for car object.")
|
||||
return
|
||||
return False
|
||||
|
||||
license_plate_frame = car[
|
||||
license_plate[1] : license_plate[3], license_plate[0] : license_plate[2]
|
||||
@@ -601,7 +409,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
# don't run for object without attributes
|
||||
if not obj_data.get("current_attributes"):
|
||||
logger.debug("No attributes to parse.")
|
||||
return
|
||||
return False
|
||||
|
||||
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
|
||||
for attr in attributes:
|
||||
@@ -615,7 +423,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
# no license plates detected in this frame
|
||||
if not license_plate:
|
||||
return
|
||||
return False
|
||||
|
||||
license_plate_box = license_plate.get("box")
|
||||
|
||||
@@ -625,7 +433,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
or area(license_plate_box) < self.config.lpr.min_area
|
||||
):
|
||||
logger.debug(f"Invalid license plate box {license_plate}")
|
||||
return
|
||||
return False
|
||||
|
||||
license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
|
||||
license_plate_frame = license_plate_frame[
|
||||
@@ -654,7 +462,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
else:
|
||||
# no plates found
|
||||
logger.debug("No text detected")
|
||||
return
|
||||
return True
|
||||
|
||||
top_plate, top_char_confidences, top_area = (
|
||||
license_plates[0],
|
||||
@@ -700,14 +508,14 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
f"length={len(top_plate)}, avg_conf={avg_confidence:.2f}, area={top_area} "
|
||||
f"vs Previous: length={len(prev_plate)}, avg_conf={prev_avg_confidence:.2f}, area={prev_area}"
|
||||
)
|
||||
return
|
||||
return True
|
||||
|
||||
# Check against minimum confidence threshold
|
||||
if avg_confidence < self.lpr_config.threshold:
|
||||
logger.debug(
|
||||
f"Average confidence {avg_confidence} is less than threshold ({self.lpr_config.threshold})"
|
||||
)
|
||||
return
|
||||
return True
|
||||
|
||||
# Determine subLabel based on known plates, use regex matching
|
||||
# Default to the detected plate, use label name if there's a match
|
||||
@@ -737,6 +545,8 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
"area": top_area,
|
||||
}
|
||||
|
||||
return True
|
||||
|
||||
def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]:
|
||||
"""Return jpg thumbnail of a region of the frame."""
|
||||
frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2BGR_I420)
|
||||
|
||||
@@ -121,8 +121,8 @@ class EventCleanup(threading.Thread):
|
||||
|
||||
events_to_update = []
|
||||
|
||||
for batch in query.iterator():
|
||||
events_to_update.extend([event.id for event in batch])
|
||||
for event in query.iterator():
|
||||
events_to_update.append(event.id)
|
||||
if len(events_to_update) >= CHUNK_SIZE:
|
||||
logger.debug(
|
||||
f"Updating {update_params} for {len(events_to_update)} events"
|
||||
@@ -257,7 +257,7 @@ class EventCleanup(threading.Thread):
|
||||
events_to_update = []
|
||||
|
||||
for event in query.iterator():
|
||||
events_to_update.append(event)
|
||||
events_to_update.append(event.id)
|
||||
|
||||
if len(events_to_update) >= CHUNK_SIZE:
|
||||
logger.debug(
|
||||
|
||||
@@ -6,6 +6,7 @@ from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
from frigate.const import (
|
||||
FFMPEG_HVC1_ARGS,
|
||||
FFMPEG_HWACCEL_NVIDIA,
|
||||
FFMPEG_HWACCEL_VAAPI,
|
||||
FFMPEG_HWACCEL_VULKAN,
|
||||
@@ -71,8 +72,8 @@ PRESETS_HW_ACCEL_DECODE = {
|
||||
"preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m",
|
||||
"preset-rpi-64-h265": "-c:v:1 hevc_v4l2m2m",
|
||||
FFMPEG_HWACCEL_VAAPI: f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi",
|
||||
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv",
|
||||
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v hevc_qsv",
|
||||
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv -bsf:v dump_extra", # https://trac.ffmpeg.org/ticket/9766#comment:17
|
||||
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v hevc_qsv -bsf:v dump_extra", # https://trac.ffmpeg.org/ticket/9766#comment:17
|
||||
FFMPEG_HWACCEL_NVIDIA: "-hwaccel cuda -hwaccel_output_format cuda",
|
||||
"preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}",
|
||||
"preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}",
|
||||
@@ -497,6 +498,6 @@ def parse_preset_output_record(arg: Any, force_record_hvc1: bool) -> list[str]:
|
||||
|
||||
if force_record_hvc1:
|
||||
# Apple only supports HEVC if it is hvc1 (vs. hev1)
|
||||
preset += ["-tag:v", "hvc1"]
|
||||
preset += FFMPEG_HVC1_ARGS
|
||||
|
||||
return preset
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[mypy]
|
||||
python_version = 3.9
|
||||
python_version = 3.11
|
||||
show_error_codes = true
|
||||
follow_imports = normal
|
||||
ignore_missing_imports = true
|
||||
|
||||
@@ -4,7 +4,7 @@ import logging
|
||||
import os
|
||||
import queue
|
||||
import threading
|
||||
from collections import Counter, defaultdict
|
||||
from collections import defaultdict
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from typing import Callable, Optional
|
||||
|
||||
@@ -51,8 +51,6 @@ class CameraState:
|
||||
self.camera_config = config.cameras[name]
|
||||
self.frame_manager = frame_manager
|
||||
self.best_objects: dict[str, TrackedObject] = {}
|
||||
self.object_counts = defaultdict(int)
|
||||
self.active_object_counts = defaultdict(int)
|
||||
self.tracked_objects: dict[str, TrackedObject] = {}
|
||||
self.frame_cache = {}
|
||||
self.zone_objects = defaultdict(list)
|
||||
@@ -338,6 +336,7 @@ class CameraState:
|
||||
"ratio": obj.obj_data["ratio"],
|
||||
"score": obj.obj_data["score"],
|
||||
"sub_label": sub_label,
|
||||
"current_zones": obj.current_zones,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -377,78 +376,6 @@ class CameraState:
|
||||
for c in self.callbacks["camera_activity"]:
|
||||
c(self.name, camera_activity)
|
||||
|
||||
# update overall camera state for each object type
|
||||
obj_counter = Counter(
|
||||
obj.obj_data["label"]
|
||||
for obj in tracked_objects.values()
|
||||
if not obj.false_positive
|
||||
)
|
||||
|
||||
active_obj_counter = Counter(
|
||||
obj.obj_data["label"]
|
||||
for obj in tracked_objects.values()
|
||||
if not obj.false_positive and obj.active
|
||||
)
|
||||
|
||||
# keep track of all labels detected for this camera
|
||||
total_label_count = 0
|
||||
total_active_label_count = 0
|
||||
|
||||
# report on all detected objects
|
||||
for obj_name, count in obj_counter.items():
|
||||
total_label_count += count
|
||||
|
||||
if count != self.object_counts[obj_name]:
|
||||
self.object_counts[obj_name] = count
|
||||
for c in self.callbacks["object_status"]:
|
||||
c(self.name, obj_name, count)
|
||||
|
||||
# update the active count on all detected objects
|
||||
# To ensure we emit 0's if all objects are stationary, we need to loop
|
||||
# over the set of all objects, not just active ones.
|
||||
for obj_name in set(obj_counter):
|
||||
count = active_obj_counter[obj_name]
|
||||
total_active_label_count += count
|
||||
|
||||
if count != self.active_object_counts[obj_name]:
|
||||
self.active_object_counts[obj_name] = count
|
||||
for c in self.callbacks["active_object_status"]:
|
||||
c(self.name, obj_name, count)
|
||||
|
||||
# publish for all labels detected for this camera
|
||||
if total_label_count != self.object_counts.get("all"):
|
||||
self.object_counts["all"] = total_label_count
|
||||
for c in self.callbacks["object_status"]:
|
||||
c(self.name, "all", total_label_count)
|
||||
|
||||
# publish active label counts for this camera
|
||||
if total_active_label_count != self.active_object_counts.get("all"):
|
||||
self.active_object_counts["all"] = total_active_label_count
|
||||
for c in self.callbacks["active_object_status"]:
|
||||
c(self.name, "all", total_active_label_count)
|
||||
|
||||
# expire any objects that are >0 and no longer detected
|
||||
expired_objects = [
|
||||
obj_name
|
||||
for obj_name, count in self.object_counts.items()
|
||||
if count > 0 and obj_name not in obj_counter
|
||||
]
|
||||
for obj_name in expired_objects:
|
||||
# Ignore the artificial all label
|
||||
if obj_name == "all":
|
||||
continue
|
||||
|
||||
self.object_counts[obj_name] = 0
|
||||
for c in self.callbacks["object_status"]:
|
||||
c(self.name, obj_name, 0)
|
||||
# Only publish if the object was previously active.
|
||||
if self.active_object_counts[obj_name] > 0:
|
||||
for c in self.callbacks["active_object_status"]:
|
||||
c(self.name, obj_name, 0)
|
||||
self.active_object_counts[obj_name] = 0
|
||||
for c in self.callbacks["snapshot"]:
|
||||
c(self.name, self.best_objects[obj_name], frame_name)
|
||||
|
||||
# cleanup thumbnail frame cache
|
||||
current_thumb_frames = {
|
||||
obj.thumbnail_data["frame_time"]
|
||||
@@ -635,14 +562,6 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
retain=True,
|
||||
)
|
||||
|
||||
def object_status(camera, object_name, status):
|
||||
self.dispatcher.publish(f"{camera}/{object_name}", status, retain=False)
|
||||
|
||||
def active_object_status(camera, object_name, status):
|
||||
self.dispatcher.publish(
|
||||
f"{camera}/{object_name}/active", status, retain=False
|
||||
)
|
||||
|
||||
def camera_activity(camera, activity):
|
||||
last_activity = self.camera_activity.get(camera)
|
||||
|
||||
@@ -659,8 +578,6 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
camera_state.on("update", update)
|
||||
camera_state.on("end", end)
|
||||
camera_state.on("snapshot", snapshot)
|
||||
camera_state.on("object_status", object_status)
|
||||
camera_state.on("active_object_status", active_object_status)
|
||||
camera_state.on("camera_activity", camera_activity)
|
||||
self.camera_states[camera] = camera_state
|
||||
|
||||
@@ -817,124 +734,6 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
)
|
||||
)
|
||||
|
||||
# update zone counts for each label
|
||||
# for each zone in the current camera
|
||||
for zone in self.config.cameras[camera].zones.keys():
|
||||
# count labels for the camera in the zone
|
||||
obj_counter = Counter(
|
||||
obj.obj_data["label"]
|
||||
for obj in camera_state.tracked_objects.values()
|
||||
if zone in obj.current_zones and not obj.false_positive
|
||||
)
|
||||
active_obj_counter = Counter(
|
||||
obj.obj_data["label"]
|
||||
for obj in camera_state.tracked_objects.values()
|
||||
if (
|
||||
zone in obj.current_zones
|
||||
and not obj.false_positive
|
||||
and obj.active
|
||||
)
|
||||
)
|
||||
total_label_count = 0
|
||||
total_active_label_count = 0
|
||||
|
||||
# update counts and publish status
|
||||
for label in set(self.zone_data[zone].keys()) | set(obj_counter.keys()):
|
||||
# Ignore the artificial all label
|
||||
if label == "all":
|
||||
continue
|
||||
|
||||
# if we have previously published a count for this zone/label
|
||||
zone_label = self.zone_data[zone][label]
|
||||
active_zone_label = self.active_zone_data[zone][label]
|
||||
if camera in zone_label:
|
||||
current_count = sum(zone_label.values())
|
||||
current_active_count = sum(active_zone_label.values())
|
||||
zone_label[camera] = (
|
||||
obj_counter[label] if label in obj_counter else 0
|
||||
)
|
||||
active_zone_label[camera] = (
|
||||
active_obj_counter[label]
|
||||
if label in active_obj_counter
|
||||
else 0
|
||||
)
|
||||
new_count = sum(zone_label.values())
|
||||
new_active_count = sum(active_zone_label.values())
|
||||
if new_count != current_count:
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/{label}",
|
||||
new_count,
|
||||
retain=False,
|
||||
)
|
||||
if new_active_count != current_active_count:
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/{label}/active",
|
||||
new_active_count,
|
||||
retain=False,
|
||||
)
|
||||
|
||||
# Set the count for the /zone/all topic.
|
||||
total_label_count += new_count
|
||||
total_active_label_count += new_active_count
|
||||
|
||||
# if this is a new zone/label combo for this camera
|
||||
else:
|
||||
if label in obj_counter:
|
||||
zone_label[camera] = obj_counter[label]
|
||||
active_zone_label[camera] = active_obj_counter[label]
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/{label}",
|
||||
obj_counter[label],
|
||||
retain=False,
|
||||
)
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/{label}/active",
|
||||
active_obj_counter[label],
|
||||
retain=False,
|
||||
)
|
||||
|
||||
# Set the count for the /zone/all topic.
|
||||
total_label_count += obj_counter[label]
|
||||
total_active_label_count += active_obj_counter[label]
|
||||
|
||||
# if we have previously published a count for this zone all labels
|
||||
zone_label = self.zone_data[zone]["all"]
|
||||
active_zone_label = self.active_zone_data[zone]["all"]
|
||||
if camera in zone_label:
|
||||
current_count = sum(zone_label.values())
|
||||
current_active_count = sum(active_zone_label.values())
|
||||
zone_label[camera] = total_label_count
|
||||
active_zone_label[camera] = total_active_label_count
|
||||
new_count = sum(zone_label.values())
|
||||
new_active_count = sum(active_zone_label.values())
|
||||
|
||||
if new_count != current_count:
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/all",
|
||||
new_count,
|
||||
retain=False,
|
||||
)
|
||||
if new_active_count != current_active_count:
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/all/active",
|
||||
new_active_count,
|
||||
retain=False,
|
||||
)
|
||||
# if this is a new zone all label for this camera
|
||||
else:
|
||||
zone_label[camera] = total_label_count
|
||||
active_zone_label[camera] = total_active_label_count
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/all",
|
||||
total_label_count,
|
||||
retain=False,
|
||||
)
|
||||
self.dispatcher.publish(
|
||||
f"{zone}/all/active",
|
||||
total_active_label_count,
|
||||
retain=False,
|
||||
)
|
||||
|
||||
# cleanup event finished queue
|
||||
while not self.stop_event.is_set():
|
||||
update = self.event_end_subscriber.check_for_update(timeout=0.01)
|
||||
|
||||
@@ -68,11 +68,13 @@ class PlusApi:
|
||||
or self._token_data["expires"] - datetime.datetime.now().timestamp() < 60
|
||||
):
|
||||
if self.key is None:
|
||||
raise Exception("Plus API not activated")
|
||||
raise Exception(
|
||||
"Plus API key not set. See https://docs.frigate.video/integrations/plus#set-your-api-key"
|
||||
)
|
||||
parts = self.key.split(":")
|
||||
r = requests.get(f"{self.host}/v1/auth/token", auth=(parts[0], parts[1]))
|
||||
if not r.ok:
|
||||
raise Exception("Unable to refresh API token")
|
||||
raise Exception(f"Unable to refresh API token: {r.text}")
|
||||
self._token_data = r.json()
|
||||
|
||||
def _get_authorization_header(self) -> dict:
|
||||
@@ -116,15 +118,6 @@ class PlusApi:
|
||||
logger.error(f"Failed to upload original: {r.status_code} {r.text}")
|
||||
raise Exception(r.text)
|
||||
|
||||
# resize and submit annotate
|
||||
files = {"file": get_jpg_bytes(image, 640, 70)}
|
||||
data = presigned_urls["annotate"]["fields"]
|
||||
data["content-type"] = "image/jpeg"
|
||||
r = requests.post(presigned_urls["annotate"]["url"], files=files, data=data)
|
||||
if not r.ok:
|
||||
logger.error(f"Failed to upload annotate: {r.status_code} {r.text}")
|
||||
raise Exception(r.text)
|
||||
|
||||
# resize and submit thumbnail
|
||||
files = {"file": get_jpg_bytes(image, 200, 70)}
|
||||
data = presigned_urls["thumbnail"]["fields"]
|
||||
|
||||
@@ -135,7 +135,7 @@ class PtzMotionEstimator:
|
||||
|
||||
try:
|
||||
logger.debug(
|
||||
f"{camera}: Motion estimator transformation: {self.coord_transformations.rel_to_abs([[0,0]])}"
|
||||
f"{camera}: Motion estimator transformation: {self.coord_transformations.rel_to_abs([[0, 0]])}"
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
@@ -471,7 +471,7 @@ class PtzAutoTracker:
|
||||
self.onvif.get_camera_status(camera)
|
||||
|
||||
logger.info(
|
||||
f"Calibration for {camera} in progress: {round((step/num_steps)*100)}% complete"
|
||||
f"Calibration for {camera} in progress: {round((step / num_steps) * 100)}% complete"
|
||||
)
|
||||
|
||||
self.calibrating[camera] = False
|
||||
@@ -690,7 +690,7 @@ class PtzAutoTracker:
|
||||
f"{camera}: Predicted movement time: {self._predict_movement_time(camera, pan, tilt)}"
|
||||
)
|
||||
logger.debug(
|
||||
f"{camera}: Actual movement time: {self.ptz_metrics[camera].stop_time.value-self.ptz_metrics[camera].start_time.value}"
|
||||
f"{camera}: Actual movement time: {self.ptz_metrics[camera].stop_time.value - self.ptz_metrics[camera].start_time.value}"
|
||||
)
|
||||
|
||||
# save metrics for better estimate calculations
|
||||
@@ -983,10 +983,10 @@ class PtzAutoTracker:
|
||||
logger.debug(f"{camera}: Zoom test: at max zoom: {at_max_zoom}")
|
||||
logger.debug(f"{camera}: Zoom test: at min zoom: {at_min_zoom}")
|
||||
logger.debug(
|
||||
f'{camera}: Zoom test: zoom in hysteresis limit: {zoom_in_hysteresis} value: {AUTOTRACKING_ZOOM_IN_HYSTERESIS} original: {self.tracked_object_metrics[camera]["original_target_box"]} max: {self.tracked_object_metrics[camera]["max_target_box"]} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]["target_box"]}'
|
||||
f"{camera}: Zoom test: zoom in hysteresis limit: {zoom_in_hysteresis} value: {AUTOTRACKING_ZOOM_IN_HYSTERESIS} original: {self.tracked_object_metrics[camera]['original_target_box']} max: {self.tracked_object_metrics[camera]['max_target_box']} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]['target_box']}"
|
||||
)
|
||||
logger.debug(
|
||||
f'{camera}: Zoom test: zoom out hysteresis limit: {zoom_out_hysteresis} value: {AUTOTRACKING_ZOOM_OUT_HYSTERESIS} original: {self.tracked_object_metrics[camera]["original_target_box"]} max: {self.tracked_object_metrics[camera]["max_target_box"]} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]["target_box"]}'
|
||||
f"{camera}: Zoom test: zoom out hysteresis limit: {zoom_out_hysteresis} value: {AUTOTRACKING_ZOOM_OUT_HYSTERESIS} original: {self.tracked_object_metrics[camera]['original_target_box']} max: {self.tracked_object_metrics[camera]['max_target_box']} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]['target_box']}"
|
||||
)
|
||||
|
||||
# Zoom in conditions (and)
|
||||
@@ -1069,7 +1069,7 @@ class PtzAutoTracker:
|
||||
pan = ((centroid_x / camera_width) - 0.5) * 2
|
||||
tilt = (0.5 - (centroid_y / camera_height)) * 2
|
||||
|
||||
logger.debug(f'{camera}: Original box: {obj.obj_data["box"]}')
|
||||
logger.debug(f"{camera}: Original box: {obj.obj_data['box']}")
|
||||
logger.debug(f"{camera}: Predicted box: {tuple(predicted_box)}")
|
||||
logger.debug(
|
||||
f"{camera}: Velocity: {tuple(np.round(average_velocity).flatten().astype(int))}"
|
||||
@@ -1179,7 +1179,7 @@ class PtzAutoTracker:
|
||||
)
|
||||
zoom = (ratio - 1) / (ratio + 1)
|
||||
logger.debug(
|
||||
f'{camera}: limit: {self.tracked_object_metrics[camera]["max_target_box"]}, ratio: {ratio} zoom calculation: {zoom}'
|
||||
f"{camera}: limit: {self.tracked_object_metrics[camera]['max_target_box']}, ratio: {ratio} zoom calculation: {zoom}"
|
||||
)
|
||||
if not result:
|
||||
# zoom out with special condition if zooming out because of velocity, edges, etc.
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
"""Configure and control camera via onvif."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from enum import Enum
|
||||
from importlib.util import find_spec
|
||||
from pathlib import Path
|
||||
|
||||
import numpy
|
||||
import requests
|
||||
from onvif import ONVIFCamera, ONVIFError
|
||||
from onvif import ONVIFCamera, ONVIFError, ONVIFService
|
||||
from zeep.exceptions import Fault, TransportError
|
||||
from zeep.transports import Transport
|
||||
|
||||
from frigate.camera import PTZMetrics
|
||||
from frigate.config import FrigateConfig, ZoomingModeEnum
|
||||
@@ -49,11 +48,6 @@ class OnvifController:
|
||||
|
||||
if cam.onvif.host:
|
||||
try:
|
||||
session = requests.Session()
|
||||
session.verify = not cam.onvif.tls_insecure
|
||||
transport = Transport(
|
||||
timeout=10, operation_timeout=10, session=session
|
||||
)
|
||||
self.cams[cam_name] = {
|
||||
"onvif": ONVIFCamera(
|
||||
cam.onvif.host,
|
||||
@@ -62,9 +56,9 @@ class OnvifController:
|
||||
cam.onvif.password,
|
||||
wsdl_dir=str(
|
||||
Path(find_spec("onvif").origin).parent / "wsdl"
|
||||
).replace("dist-packages/onvif", "site-packages"),
|
||||
),
|
||||
adjust_time=cam.onvif.ignore_time_mismatch,
|
||||
transport=transport,
|
||||
encrypt=not cam.onvif.tls_insecure,
|
||||
),
|
||||
"init": False,
|
||||
"active": False,
|
||||
@@ -74,11 +68,12 @@ class OnvifController:
|
||||
except ONVIFError as e:
|
||||
logger.error(f"Onvif connection to {cam.name} failed: {e}")
|
||||
|
||||
def _init_onvif(self, camera_name: str) -> bool:
|
||||
async def _init_onvif(self, camera_name: str) -> bool:
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
await onvif.update_xaddrs()
|
||||
|
||||
# create init services
|
||||
media = onvif.create_media_service()
|
||||
media: ONVIFService = await onvif.create_media_service()
|
||||
logger.debug(f"Onvif media xaddr for {camera_name}: {media.xaddr}")
|
||||
|
||||
try:
|
||||
@@ -92,7 +87,7 @@ class OnvifController:
|
||||
return False
|
||||
|
||||
try:
|
||||
profiles = media.GetProfiles()
|
||||
profiles = await media.GetProfiles()
|
||||
logger.debug(f"Onvif profiles for {camera_name}: {profiles}")
|
||||
except (ONVIFError, Fault, TransportError) as e:
|
||||
logger.error(
|
||||
@@ -101,7 +96,7 @@ class OnvifController:
|
||||
return False
|
||||
|
||||
profile = None
|
||||
for key, onvif_profile in enumerate(profiles):
|
||||
for _, onvif_profile in enumerate(profiles):
|
||||
if (
|
||||
onvif_profile.VideoEncoderConfiguration
|
||||
and onvif_profile.PTZConfiguration
|
||||
@@ -135,7 +130,8 @@ class OnvifController:
|
||||
)
|
||||
return False
|
||||
|
||||
ptz = onvif.create_ptz_service()
|
||||
ptz: ONVIFService = await onvif.create_ptz_service()
|
||||
self.cams[camera_name]["ptz"] = ptz
|
||||
|
||||
# setup continuous moving request
|
||||
move_request = ptz.create_type("ContinuousMove")
|
||||
@@ -246,7 +242,7 @@ class OnvifController:
|
||||
|
||||
# setup existing presets
|
||||
try:
|
||||
presets: list[dict] = ptz.GetPresets({"ProfileToken": profile.token})
|
||||
presets: list[dict] = await ptz.GetPresets({"ProfileToken": profile.token})
|
||||
except ONVIFError as e:
|
||||
logger.warning(f"Unable to get presets from camera: {camera_name}: {e}")
|
||||
presets = []
|
||||
@@ -325,19 +321,19 @@ class OnvifController:
|
||||
)
|
||||
|
||||
self.cams[camera_name]["features"] = supported_features
|
||||
|
||||
self.cams[camera_name]["init"] = True
|
||||
return True
|
||||
|
||||
def _stop(self, camera_name: str) -> None:
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
move_request = self.cams[camera_name]["move_request"]
|
||||
onvif.get_service("ptz").Stop(
|
||||
{
|
||||
"ProfileToken": move_request.ProfileToken,
|
||||
"PanTilt": True,
|
||||
"Zoom": True,
|
||||
}
|
||||
asyncio.run(
|
||||
self.cams[camera_name]["ptz"].Stop(
|
||||
{
|
||||
"ProfileToken": move_request.ProfileToken,
|
||||
"PanTilt": True,
|
||||
"Zoom": True,
|
||||
}
|
||||
)
|
||||
)
|
||||
self.cams[camera_name]["active"] = False
|
||||
|
||||
@@ -353,7 +349,6 @@ class OnvifController:
|
||||
return
|
||||
|
||||
self.cams[camera_name]["active"] = True
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
move_request = self.cams[camera_name]["move_request"]
|
||||
|
||||
if command == OnvifCommandEnum.move_left:
|
||||
@@ -376,7 +371,7 @@ class OnvifController:
|
||||
}
|
||||
|
||||
try:
|
||||
onvif.get_service("ptz").ContinuousMove(move_request)
|
||||
asyncio.run(self.cams[camera_name]["ptz"].ContinuousMove(move_request))
|
||||
except ONVIFError as e:
|
||||
logger.warning(f"Onvif sending move request to {camera_name} failed: {e}")
|
||||
|
||||
@@ -404,7 +399,6 @@ class OnvifController:
|
||||
camera_name
|
||||
].frame_time.value
|
||||
self.ptz_metrics[camera_name].stop_time.value = 0
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
move_request = self.cams[camera_name]["relative_move_request"]
|
||||
|
||||
# function takes in -1 to 1 for pan and tilt, interpolate to the values of the camera.
|
||||
@@ -450,7 +444,7 @@ class OnvifController:
|
||||
}
|
||||
move_request.Translation.Zoom.x = zoom
|
||||
|
||||
onvif.get_service("ptz").RelativeMove(move_request)
|
||||
asyncio.run(self.cams[camera_name]["ptz"].RelativeMove(move_request))
|
||||
|
||||
# reset after the move request
|
||||
move_request.Translation.PanTilt.x = 0
|
||||
@@ -475,13 +469,14 @@ class OnvifController:
|
||||
self.ptz_metrics[camera_name].start_time.value = 0
|
||||
self.ptz_metrics[camera_name].stop_time.value = 0
|
||||
move_request = self.cams[camera_name]["move_request"]
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
preset_token = self.cams[camera_name]["presets"][preset]
|
||||
onvif.get_service("ptz").GotoPreset(
|
||||
{
|
||||
"ProfileToken": move_request.ProfileToken,
|
||||
"PresetToken": preset_token,
|
||||
}
|
||||
asyncio.run(
|
||||
self.cams[camera_name]["ptz"].GotoPreset(
|
||||
{
|
||||
"ProfileToken": move_request.ProfileToken,
|
||||
"PresetToken": preset_token,
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
self.cams[camera_name]["active"] = False
|
||||
@@ -498,7 +493,6 @@ class OnvifController:
|
||||
return
|
||||
|
||||
self.cams[camera_name]["active"] = True
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
move_request = self.cams[camera_name]["move_request"]
|
||||
|
||||
if command == OnvifCommandEnum.zoom_in:
|
||||
@@ -506,7 +500,7 @@ class OnvifController:
|
||||
elif command == OnvifCommandEnum.zoom_out:
|
||||
move_request.Velocity = {"Zoom": {"x": -0.5}}
|
||||
|
||||
onvif.get_service("ptz").ContinuousMove(move_request)
|
||||
asyncio.run(self.cams[camera_name]["ptz"].ContinuousMove(move_request))
|
||||
|
||||
def _zoom_absolute(self, camera_name: str, zoom, speed) -> None:
|
||||
if "zoom-a" not in self.cams[camera_name]["features"]:
|
||||
@@ -530,7 +524,6 @@ class OnvifController:
|
||||
camera_name
|
||||
].frame_time.value
|
||||
self.ptz_metrics[camera_name].stop_time.value = 0
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
move_request = self.cams[camera_name]["absolute_move_request"]
|
||||
|
||||
# function takes in 0 to 1 for zoom, interpolate to the values of the camera.
|
||||
@@ -548,7 +541,7 @@ class OnvifController:
|
||||
|
||||
logger.debug(f"{camera_name}: Absolute zoom: {zoom}")
|
||||
|
||||
onvif.get_service("ptz").AbsoluteMove(move_request)
|
||||
asyncio.run(self.cams[camera_name]["ptz"].AbsoluteMove(move_request))
|
||||
|
||||
self.cams[camera_name]["active"] = False
|
||||
|
||||
@@ -560,7 +553,7 @@ class OnvifController:
|
||||
return
|
||||
|
||||
if not self.cams[camera_name]["init"]:
|
||||
if not self._init_onvif(camera_name):
|
||||
if not asyncio.run(self._init_onvif(camera_name)):
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -590,7 +583,7 @@ class OnvifController:
|
||||
return {}
|
||||
|
||||
if not self.cams[camera_name]["init"]:
|
||||
self._init_onvif(camera_name)
|
||||
asyncio.run(self._init_onvif(camera_name))
|
||||
|
||||
return {
|
||||
"name": camera_name,
|
||||
@@ -604,15 +597,16 @@ class OnvifController:
|
||||
return {}
|
||||
|
||||
if not self.cams[camera_name]["init"]:
|
||||
self._init_onvif(camera_name)
|
||||
asyncio.run(self._init_onvif(camera_name))
|
||||
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
service_capabilities_request = self.cams[camera_name][
|
||||
"service_capabilities_request"
|
||||
]
|
||||
try:
|
||||
service_capabilities = onvif.get_service("ptz").GetServiceCapabilities(
|
||||
service_capabilities_request
|
||||
service_capabilities = asyncio.run(
|
||||
self.cams[camera_name]["ptz"].GetServiceCapabilities(
|
||||
service_capabilities_request
|
||||
)
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
@@ -633,12 +627,13 @@ class OnvifController:
|
||||
return {}
|
||||
|
||||
if not self.cams[camera_name]["init"]:
|
||||
self._init_onvif(camera_name)
|
||||
asyncio.run(self._init_onvif(camera_name))
|
||||
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
status_request = self.cams[camera_name]["status_request"]
|
||||
try:
|
||||
status = onvif.get_service("ptz").GetStatus(status_request)
|
||||
status = asyncio.run(
|
||||
self.cams[camera_name]["ptz"].GetStatus(status_request)
|
||||
)
|
||||
except Exception:
|
||||
pass # We're unsupported, that'll be reported in the next check.
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ from frigate.const import (
|
||||
CACHE_DIR,
|
||||
CLIPS_DIR,
|
||||
EXPORT_DIR,
|
||||
FFMPEG_HVC1_ARGS,
|
||||
MAX_PLAYLIST_SECONDS,
|
||||
PREVIEW_FRAME_TYPE,
|
||||
)
|
||||
@@ -219,7 +220,7 @@ class RecordingExporter(threading.Thread):
|
||||
|
||||
if self.playback_factor == PlaybackFactorEnum.realtime:
|
||||
ffmpeg_cmd = (
|
||||
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart {video_path}"
|
||||
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart"
|
||||
).split(" ")
|
||||
elif self.playback_factor == PlaybackFactorEnum.timelapse_25x:
|
||||
ffmpeg_cmd = (
|
||||
@@ -227,11 +228,16 @@ class RecordingExporter(threading.Thread):
|
||||
self.config.ffmpeg.ffmpeg_path,
|
||||
self.config.ffmpeg.hwaccel_args,
|
||||
f"-an {ffmpeg_input}",
|
||||
f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart {video_path}",
|
||||
f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart",
|
||||
EncodeTypeEnum.timelapse,
|
||||
)
|
||||
).split(" ")
|
||||
|
||||
if self.config.ffmpeg.apple_compatibility:
|
||||
ffmpeg_cmd += FFMPEG_HVC1_ARGS
|
||||
|
||||
ffmpeg_cmd.append(video_path)
|
||||
|
||||
return ffmpeg_cmd, playlist_lines
|
||||
|
||||
def get_preview_export_command(self, video_path: str) -> list[str]:
|
||||
|
||||
@@ -449,7 +449,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
return None
|
||||
else:
|
||||
logger.debug(
|
||||
f"Copied {file_path} in {datetime.datetime.now().timestamp()-start_frame} seconds."
|
||||
f"Copied {file_path} in {datetime.datetime.now().timestamp() - start_frame} seconds."
|
||||
)
|
||||
|
||||
try:
|
||||
|
||||
@@ -256,7 +256,7 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
elif object["sub_label"][0] in self.config.model.all_attributes:
|
||||
segment.detections[object["id"]] = object["sub_label"][0]
|
||||
else:
|
||||
segment.detections[object["id"]] = f'{object["label"]}-verified'
|
||||
segment.detections[object["id"]] = f"{object['label']}-verified"
|
||||
segment.sub_labels[object["id"]] = object["sub_label"][0]
|
||||
|
||||
# if object is alert label
|
||||
@@ -352,7 +352,7 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
elif object["sub_label"][0] in self.config.model.all_attributes:
|
||||
detections[object["id"]] = object["sub_label"][0]
|
||||
else:
|
||||
detections[object["id"]] = f'{object["label"]}-verified'
|
||||
detections[object["id"]] = f"{object['label']}-verified"
|
||||
sub_labels[object["id"]] = object["sub_label"][0]
|
||||
|
||||
# if object is alert label
|
||||
@@ -527,7 +527,9 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
|
||||
if event_id in self.indefinite_events[camera]:
|
||||
self.indefinite_events[camera].pop(event_id)
|
||||
current_segment.last_update = manual_info["end_time"]
|
||||
|
||||
if len(self.indefinite_events[camera]) == 0:
|
||||
current_segment.last_update = manual_info["end_time"]
|
||||
else:
|
||||
logger.error(
|
||||
f"Event with ID {event_id} has a set duration and can not be ended manually."
|
||||
|
||||
@@ -72,8 +72,7 @@ class BaseServiceProcess(Service, ABC):
|
||||
running = False
|
||||
except TimeoutError:
|
||||
self.manager.logger.warning(
|
||||
f"{self.name} is still running after "
|
||||
f"{timeout} seconds. Killing."
|
||||
f"{self.name} is still running after {timeout} seconds. Killing."
|
||||
)
|
||||
|
||||
if running:
|
||||
|
||||
@@ -26,7 +26,7 @@ class Service(ABC):
|
||||
self.__dict__["name"] = name
|
||||
|
||||
self.__manager = manager or ServiceManager.current()
|
||||
self.__lock = asyncio.Lock(loop=self.__manager._event_loop)
|
||||
self.__lock = asyncio.Lock(loop=self.__manager._event_loop) # type: ignore[call-arg]
|
||||
self.__manager._register(self)
|
||||
|
||||
@property
|
||||
|
||||
@@ -14,6 +14,7 @@ from requests.exceptions import RequestException
|
||||
from frigate.camera import CameraMetrics
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
|
||||
from frigate.data_processing.types import DataProcessorMetrics
|
||||
from frigate.object_detection import ObjectDetectProcess
|
||||
from frigate.types import StatsTrackingTypes
|
||||
from frigate.util.services import (
|
||||
@@ -51,11 +52,13 @@ def get_latest_version(config: FrigateConfig) -> str:
|
||||
def stats_init(
|
||||
config: FrigateConfig,
|
||||
camera_metrics: dict[str, CameraMetrics],
|
||||
embeddings_metrics: DataProcessorMetrics | None,
|
||||
detectors: dict[str, ObjectDetectProcess],
|
||||
processes: dict[str, int],
|
||||
) -> StatsTrackingTypes:
|
||||
stats_tracking: StatsTrackingTypes = {
|
||||
"camera_metrics": camera_metrics,
|
||||
"embeddings_metrics": embeddings_metrics,
|
||||
"detectors": detectors,
|
||||
"started": int(time.time()),
|
||||
"latest_frigate_version": get_latest_version(config),
|
||||
@@ -195,7 +198,7 @@ async def set_gpu_stats(
|
||||
continue
|
||||
|
||||
# intel QSV GPU
|
||||
intel_usage = get_intel_gpu_stats()
|
||||
intel_usage = get_intel_gpu_stats(config.telemetry.stats.sriov)
|
||||
|
||||
if intel_usage is not None:
|
||||
stats["intel-qsv"] = intel_usage or {"gpu": "", "mem": ""}
|
||||
@@ -220,7 +223,7 @@ async def set_gpu_stats(
|
||||
continue
|
||||
|
||||
# intel VAAPI GPU
|
||||
intel_usage = get_intel_gpu_stats()
|
||||
intel_usage = get_intel_gpu_stats(config.telemetry.stats.sriov)
|
||||
|
||||
if intel_usage is not None:
|
||||
stats["intel-vaapi"] = intel_usage or {"gpu": "", "mem": ""}
|
||||
@@ -279,6 +282,27 @@ def stats_snapshot(
|
||||
}
|
||||
stats["detection_fps"] = round(total_detection_fps, 2)
|
||||
|
||||
if config.semantic_search.enabled:
|
||||
embeddings_metrics = stats_tracking["embeddings_metrics"]
|
||||
stats["embeddings"] = {
|
||||
"image_embedding_speed": round(
|
||||
embeddings_metrics.image_embeddings_fps.value * 1000, 2
|
||||
),
|
||||
"text_embedding_speed": round(
|
||||
embeddings_metrics.text_embeddings_sps.value * 1000, 2
|
||||
),
|
||||
}
|
||||
|
||||
if config.face_recognition.enabled:
|
||||
stats["embeddings"]["face_recognition_speed"] = round(
|
||||
embeddings_metrics.face_rec_fps.value * 1000, 2
|
||||
)
|
||||
|
||||
if config.lpr.enabled:
|
||||
stats["embeddings"]["plate_recognition_speed"] = round(
|
||||
embeddings_metrics.alpr_pps.value * 1000, 2
|
||||
)
|
||||
|
||||
get_processing_stats(config, stats, hwaccel_errors)
|
||||
|
||||
stats["service"] = {
|
||||
|
||||
@@ -75,11 +75,11 @@ class TestConfig(unittest.TestCase):
|
||||
"detectors": {
|
||||
"cpu": {
|
||||
"type": "cpu",
|
||||
"model": {"path": "/cpu_model.tflite"},
|
||||
"model_path": "/cpu_model.tflite",
|
||||
},
|
||||
"edgetpu": {
|
||||
"type": "edgetpu",
|
||||
"model": {"path": "/edgetpu_model.tflite"},
|
||||
"model_path": "/edgetpu_model.tflite",
|
||||
},
|
||||
"openvino": {
|
||||
"type": "openvino",
|
||||
|
||||
@@ -38,7 +38,7 @@ class TestGpuStats(unittest.TestCase):
|
||||
process.returncode = 124
|
||||
process.stdout = self.intel_results
|
||||
sp.return_value = process
|
||||
intel_stats = get_intel_gpu_stats()
|
||||
intel_stats = get_intel_gpu_stats(False)
|
||||
print(f"the intel stats are {intel_stats}")
|
||||
assert intel_stats == {
|
||||
"gpu": "1.13%",
|
||||
|
||||
@@ -339,7 +339,7 @@ class TrackedObject:
|
||||
box[2],
|
||||
box[3],
|
||||
self.obj_data["label"],
|
||||
f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}",
|
||||
f"{int(self.thumbnail_data['score'] * 100)}% {int(self.thumbnail_data['area'])}",
|
||||
thickness=thickness,
|
||||
color=color,
|
||||
)
|
||||
|
||||
@@ -2,11 +2,13 @@ from enum import Enum
|
||||
from typing import TypedDict
|
||||
|
||||
from frigate.camera import CameraMetrics
|
||||
from frigate.data_processing.types import DataProcessorMetrics
|
||||
from frigate.object_detection import ObjectDetectProcess
|
||||
|
||||
|
||||
class StatsTrackingTypes(TypedDict):
|
||||
camera_metrics: dict[str, CameraMetrics]
|
||||
embeddings_metrics: DataProcessorMetrics | None
|
||||
detectors: dict[str, ObjectDetectProcess]
|
||||
started: int
|
||||
latest_frigate_version: str
|
||||
|
||||
@@ -13,7 +13,7 @@ from frigate.util.services import get_video_properties
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CURRENT_CONFIG_VERSION = "0.15-0"
|
||||
CURRENT_CONFIG_VERSION = "0.15-1"
|
||||
DEFAULT_CONFIG_FILE = "/config/config.yml"
|
||||
|
||||
|
||||
@@ -77,6 +77,13 @@ def migrate_frigate_config(config_file: str):
|
||||
yaml.dump(new_config, f)
|
||||
previous_version = "0.15-0"
|
||||
|
||||
if previous_version < "0.15-1":
|
||||
logger.info(f"Migrating frigate config from {previous_version} to 0.15-1...")
|
||||
new_config = migrate_015_1(config)
|
||||
with open(config_file, "w") as f:
|
||||
yaml.dump(new_config, f)
|
||||
previous_version = "0.15-1"
|
||||
|
||||
logger.info("Finished frigate config migration...")
|
||||
|
||||
|
||||
@@ -267,6 +274,21 @@ def migrate_015_0(config: dict[str, dict[str, any]]) -> dict[str, dict[str, any]
|
||||
return new_config
|
||||
|
||||
|
||||
def migrate_015_1(config: dict[str, dict[str, any]]) -> dict[str, dict[str, any]]:
|
||||
"""Handle migrating frigate config to 0.15-1"""
|
||||
new_config = config.copy()
|
||||
|
||||
for detector, detector_config in config.get("detectors", {}).items():
|
||||
path = detector_config.get("model", {}).get("path")
|
||||
|
||||
if path:
|
||||
new_config["detectors"][detector]["model_path"] = path
|
||||
del new_config["detectors"][detector]["model"]
|
||||
|
||||
new_config["version"] = "0.15-1"
|
||||
return new_config
|
||||
|
||||
|
||||
def get_relative_coordinates(
|
||||
mask: Optional[Union[str, list]], frame_shape: tuple[int, int]
|
||||
) -> Union[str, list]:
|
||||
@@ -292,7 +314,7 @@ def get_relative_coordinates(
|
||||
continue
|
||||
|
||||
rel_points.append(
|
||||
f"{round(x / frame_shape[1], 3)},{round(y / frame_shape[0], 3)}"
|
||||
f"{round(x / frame_shape[1], 3)},{round(y / frame_shape[0], 3)}"
|
||||
)
|
||||
|
||||
relative_masks.append(",".join(rel_points))
|
||||
@@ -315,7 +337,7 @@ def get_relative_coordinates(
|
||||
return []
|
||||
|
||||
rel_points.append(
|
||||
f"{round(x / frame_shape[1], 3)},{round(y / frame_shape[0], 3)}"
|
||||
f"{round(x / frame_shape[1], 3)},{round(y / frame_shape[0], 3)}"
|
||||
)
|
||||
|
||||
mask = ",".join(rel_points)
|
||||
|
||||
@@ -51,12 +51,14 @@ class ModelDownloader:
|
||||
download_path: str,
|
||||
file_names: List[str],
|
||||
download_func: Callable[[str], None],
|
||||
complete_func: Callable[[], None] | None = None,
|
||||
silent: bool = False,
|
||||
):
|
||||
self.model_name = model_name
|
||||
self.download_path = download_path
|
||||
self.file_names = file_names
|
||||
self.download_func = download_func
|
||||
self.complete_func = complete_func
|
||||
self.silent = silent
|
||||
self.requestor = InterProcessRequestor()
|
||||
self.download_thread = None
|
||||
@@ -97,6 +99,9 @@ class ModelDownloader:
|
||||
},
|
||||
)
|
||||
|
||||
if self.complete_func:
|
||||
self.complete_func()
|
||||
|
||||
self.requestor.stop()
|
||||
self.download_complete.set()
|
||||
|
||||
|
||||
@@ -2,14 +2,9 @@
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
from playhouse.sqliteq import SqliteQueueDatabase
|
||||
|
||||
from frigate.config.semantic_search import FaceRecognitionConfig
|
||||
|
||||
try:
|
||||
import openvino as ov
|
||||
@@ -20,9 +15,6 @@ except ImportError:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
MIN_MATCHING_FACES = 2
|
||||
|
||||
|
||||
def get_ort_providers(
|
||||
force_cpu: bool = False, device: str = "AUTO", requires_fp16: bool = False
|
||||
) -> tuple[list[str], list[dict[str, any]]]:
|
||||
@@ -156,114 +148,3 @@ class ONNXModelRunner:
|
||||
return [infer_request.get_output_tensor().data]
|
||||
elif self.type == "ort":
|
||||
return self.ort.run(None, input)
|
||||
|
||||
|
||||
class FaceClassificationModel:
|
||||
def __init__(self, config: FaceRecognitionConfig, db: SqliteQueueDatabase):
|
||||
self.config = config
|
||||
self.db = db
|
||||
self.landmark_detector = cv2.face.createFacemarkLBF()
|
||||
self.landmark_detector.loadModel("/config/model_cache/facedet/landmarkdet.yaml")
|
||||
self.recognizer: cv2.face.LBPHFaceRecognizer = (
|
||||
cv2.face.LBPHFaceRecognizer_create(
|
||||
radius=2, threshold=(1 - config.min_score) * 1000
|
||||
)
|
||||
)
|
||||
self.label_map: dict[int, str] = {}
|
||||
self.__build_classifier()
|
||||
|
||||
def __build_classifier(self) -> None:
|
||||
labels = []
|
||||
faces = []
|
||||
|
||||
dir = "/media/frigate/clips/faces"
|
||||
for idx, name in enumerate(os.listdir(dir)):
|
||||
if name == "debug":
|
||||
continue
|
||||
|
||||
self.label_map[idx] = name
|
||||
face_folder = os.path.join(dir, name)
|
||||
for image in os.listdir(face_folder):
|
||||
img = cv2.imread(os.path.join(face_folder, image))
|
||||
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
img = self.__align_face(img, img.shape[1], img.shape[0])
|
||||
faces.append(img)
|
||||
labels.append(idx)
|
||||
|
||||
self.recognizer.train(faces, np.array(labels))
|
||||
|
||||
def __align_face(
|
||||
self,
|
||||
image: np.ndarray,
|
||||
output_width: int,
|
||||
output_height: int,
|
||||
) -> np.ndarray:
|
||||
_, lands = self.landmark_detector.fit(
|
||||
image, np.array([(0, 0, image.shape[1], image.shape[0])])
|
||||
)
|
||||
landmarks = lands[0][0]
|
||||
|
||||
# get landmarks for eyes
|
||||
leftEyePts = landmarks[42:48]
|
||||
rightEyePts = landmarks[36:42]
|
||||
|
||||
# compute the center of mass for each eye
|
||||
leftEyeCenter = leftEyePts.mean(axis=0).astype("int")
|
||||
rightEyeCenter = rightEyePts.mean(axis=0).astype("int")
|
||||
|
||||
# compute the angle between the eye centroids
|
||||
dY = rightEyeCenter[1] - leftEyeCenter[1]
|
||||
dX = rightEyeCenter[0] - leftEyeCenter[0]
|
||||
angle = np.degrees(np.arctan2(dY, dX)) - 180
|
||||
|
||||
# compute the desired right eye x-coordinate based on the
|
||||
# desired x-coordinate of the left eye
|
||||
desiredRightEyeX = 1.0 - 0.35
|
||||
|
||||
# determine the scale of the new resulting image by taking
|
||||
# the ratio of the distance between eyes in the *current*
|
||||
# image to the ratio of distance between eyes in the
|
||||
# *desired* image
|
||||
dist = np.sqrt((dX**2) + (dY**2))
|
||||
desiredDist = desiredRightEyeX - 0.35
|
||||
desiredDist *= output_width
|
||||
scale = desiredDist / dist
|
||||
|
||||
# compute center (x, y)-coordinates (i.e., the median point)
|
||||
# between the two eyes in the input image
|
||||
# grab the rotation matrix for rotating and scaling the face
|
||||
eyesCenter = (
|
||||
int((leftEyeCenter[0] + rightEyeCenter[0]) // 2),
|
||||
int((leftEyeCenter[1] + rightEyeCenter[1]) // 2),
|
||||
)
|
||||
M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
|
||||
|
||||
# update the translation component of the matrix
|
||||
tX = output_width * 0.5
|
||||
tY = output_height * 0.35
|
||||
M[0, 2] += tX - eyesCenter[0]
|
||||
M[1, 2] += tY - eyesCenter[1]
|
||||
|
||||
# apply the affine transformation
|
||||
return cv2.warpAffine(
|
||||
image, M, (output_width, output_height), flags=cv2.INTER_CUBIC
|
||||
)
|
||||
|
||||
def clear_classifier(self) -> None:
|
||||
self.classifier = None
|
||||
self.labeler = None
|
||||
self.label_map = {}
|
||||
|
||||
def classify_face(self, face_image: np.ndarray) -> Optional[tuple[str, float]]:
|
||||
if not self.label_map:
|
||||
self.__build_classifier()
|
||||
|
||||
img = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
|
||||
img = self.__align_face(img, img.shape[1], img.shape[0])
|
||||
index, distance = self.recognizer.predict(img)
|
||||
|
||||
if index == -1:
|
||||
return None
|
||||
|
||||
score = 1.0 - (distance / 1000)
|
||||
return self.label_map[index], round(score, 2)
|
||||
|
||||
@@ -255,7 +255,7 @@ def get_amd_gpu_stats() -> dict[str, str]:
|
||||
return results
|
||||
|
||||
|
||||
def get_intel_gpu_stats() -> dict[str, str]:
|
||||
def get_intel_gpu_stats(sriov: bool) -> dict[str, str]:
|
||||
"""Get stats using intel_gpu_top."""
|
||||
|
||||
def get_stats_manually(output: str) -> dict[str, str]:
|
||||
@@ -302,6 +302,9 @@ def get_intel_gpu_stats() -> dict[str, str]:
|
||||
"1",
|
||||
]
|
||||
|
||||
if sriov:
|
||||
intel_gpu_top_command += ["-d", "drm:/dev/dri/card0"]
|
||||
|
||||
p = sp.run(
|
||||
intel_gpu_top_command,
|
||||
encoding="ascii",
|
||||
@@ -390,12 +393,22 @@ def try_get_info(f, h, default="N/A"):
|
||||
|
||||
|
||||
def get_nvidia_gpu_stats() -> dict[int, dict]:
|
||||
names: dict[str, int] = {}
|
||||
results = {}
|
||||
try:
|
||||
nvml.nvmlInit()
|
||||
deviceCount = nvml.nvmlDeviceGetCount()
|
||||
for i in range(deviceCount):
|
||||
handle = nvml.nvmlDeviceGetHandleByIndex(i)
|
||||
gpu_name = nvml.nvmlDeviceGetName(handle)
|
||||
|
||||
# handle case where user has multiple of same GPU
|
||||
if gpu_name in names:
|
||||
names[gpu_name] += 1
|
||||
gpu_name += f" ({names.get(gpu_name)})"
|
||||
else:
|
||||
names[gpu_name] = 1
|
||||
|
||||
meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle)
|
||||
util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
|
||||
enc = try_get_info(nvml.nvmlDeviceGetEncoderUtilization, handle)
|
||||
@@ -423,7 +436,7 @@ def get_nvidia_gpu_stats() -> dict[int, dict]:
|
||||
dec_util = -1
|
||||
|
||||
results[i] = {
|
||||
"name": nvml.nvmlDeviceGetName(handle),
|
||||
"name": gpu_name,
|
||||
"gpu": gpu_util,
|
||||
"mem": gpu_mem_util,
|
||||
"enc": enc_util,
|
||||
|
||||
@@ -208,7 +208,7 @@ class ProcessClip:
|
||||
box[2],
|
||||
box[3],
|
||||
obj["id"],
|
||||
f"{int(obj['score']*100)}% {int(obj['area'])}",
|
||||
f"{int(obj['score'] * 100)}% {int(obj['area'])}",
|
||||
thickness=thickness,
|
||||
color=color,
|
||||
)
|
||||
@@ -227,7 +227,7 @@ class ProcessClip:
|
||||
)
|
||||
|
||||
cv2.imwrite(
|
||||
f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg",
|
||||
f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time * 1000000)}.jpg",
|
||||
current_frame,
|
||||
)
|
||||
|
||||
@@ -290,7 +290,7 @@ def process(path, label, output, debug_path):
|
||||
1 for result in results if result[1]["true_positive_objects"] > 0
|
||||
)
|
||||
print(
|
||||
f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s)."
|
||||
f"Objects were detected in {positive_count}/{len(results)}({positive_count / len(results) * 100:.2f}%) clip(s)."
|
||||
)
|
||||
|
||||
if output:
|
||||
|
||||
@@ -755,7 +755,11 @@ export function CameraGroupEdit({
|
||||
<FormMessage />
|
||||
{[
|
||||
...(birdseyeConfig?.enabled ? ["birdseye"] : []),
|
||||
...Object.keys(config?.cameras ?? {}),
|
||||
...Object.keys(config?.cameras ?? {}).sort(
|
||||
(a, b) =>
|
||||
(config?.cameras[a]?.ui?.order ?? 0) -
|
||||
(config?.cameras[b]?.ui?.order ?? 0),
|
||||
),
|
||||
].map((camera) => (
|
||||
<FormControl key={camera}>
|
||||
<FilterSwitch
|
||||
|
||||
25
web/src/components/icons/AddFaceIcon.tsx
Normal file
@@ -0,0 +1,25 @@
|
||||
import { forwardRef } from "react";
|
||||
import { LuPlus, LuScanFace } from "react-icons/lu";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
type AddFaceIconProps = {
|
||||
className?: string;
|
||||
onClick?: () => void;
|
||||
};
|
||||
|
||||
const AddFaceIcon = forwardRef<HTMLDivElement, AddFaceIconProps>(
|
||||
({ className, onClick }, ref) => {
|
||||
return (
|
||||
<div
|
||||
ref={ref}
|
||||
className={cn("relative flex items-center", className)}
|
||||
onClick={onClick}
|
||||
>
|
||||
<LuScanFace className="size-full" />
|
||||
<LuPlus className="absolute size-4 translate-x-3 translate-y-3" />
|
||||
</div>
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
export default AddFaceIcon;
|
||||
@@ -477,7 +477,10 @@ export default function ObjectLifecycle({
|
||||
</p>
|
||||
{Array.isArray(item.data.box) &&
|
||||
item.data.box.length >= 4
|
||||
? (item.data.box[2] / item.data.box[3]).toFixed(2)
|
||||
? (
|
||||
aspectRatio *
|
||||
(item.data.box[2] / item.data.box[3])
|
||||
).toFixed(2)
|
||||
: "N/A"}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -505,45 +505,46 @@ function ObjectDetailsTab({
|
||||
|
||||
<div className="flex w-full flex-row justify-end gap-2">
|
||||
{config?.cameras[search.camera].genai.enabled && search.end_time && (
|
||||
<>
|
||||
<div className="flex items-start">
|
||||
<Button
|
||||
className="rounded-r-none border-r-0"
|
||||
aria-label="Regenerate tracked object description"
|
||||
onClick={() => regenerateDescription("thumbnails")}
|
||||
>
|
||||
Regenerate
|
||||
</Button>
|
||||
{search.has_snapshot && (
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button
|
||||
className="rounded-l-none border-l-0 px-2"
|
||||
aria-label="Expand regeneration menu"
|
||||
>
|
||||
<FaChevronDown className="size-3" />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent>
|
||||
<DropdownMenuItem
|
||||
className="cursor-pointer"
|
||||
aria-label="Regenerate from snapshot"
|
||||
onClick={() => regenerateDescription("snapshot")}
|
||||
>
|
||||
Regenerate from Snapshot
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
className="cursor-pointer"
|
||||
aria-label="Regenerate from thumbnails"
|
||||
onClick={() => regenerateDescription("thumbnails")}
|
||||
>
|
||||
Regenerate from Thumbnails
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="flex items-start">
|
||||
<Button
|
||||
className="rounded-r-none border-r-0"
|
||||
aria-label="Regenerate tracked object description"
|
||||
onClick={() => regenerateDescription("thumbnails")}
|
||||
>
|
||||
Regenerate
|
||||
</Button>
|
||||
{search.has_snapshot && (
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button
|
||||
className="rounded-l-none border-l-0 px-2"
|
||||
aria-label="Expand regeneration menu"
|
||||
>
|
||||
<FaChevronDown className="size-3" />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent>
|
||||
<DropdownMenuItem
|
||||
className="cursor-pointer"
|
||||
aria-label="Regenerate from snapshot"
|
||||
onClick={() => regenerateDescription("snapshot")}
|
||||
>
|
||||
Regenerate from Snapshot
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
className="cursor-pointer"
|
||||
aria-label="Regenerate from thumbnails"
|
||||
onClick={() => regenerateDescription("thumbnails")}
|
||||
>
|
||||
Regenerate from Thumbnails
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
{(config?.cameras[search.camera].genai.enabled && search.end_time) ||
|
||||
(!config?.cameras[search.camera].genai.enabled && (
|
||||
<Button
|
||||
variant="select"
|
||||
aria-label="Save"
|
||||
@@ -551,8 +552,7 @@ function ObjectDetailsTab({
|
||||
>
|
||||
Save
|
||||
</Button>
|
||||
</>
|
||||
)}
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -46,7 +46,7 @@ export default function SearchSettings({
|
||||
const trigger = (
|
||||
<Button
|
||||
className="flex items-center gap-2"
|
||||
aria-label="Search Settings"
|
||||
aria-label="Explore Settings"
|
||||
size="sm"
|
||||
>
|
||||
<FaCog className="text-secondary-foreground" />
|
||||
|
||||
@@ -328,12 +328,12 @@ export default function Explore() {
|
||||
<div className="flex max-w-96 flex-col items-center justify-center space-y-3 rounded-lg bg-background/50 p-5">
|
||||
<div className="my-5 flex flex-col items-center gap-2 text-xl">
|
||||
<TbExclamationCircle className="mb-3 size-10" />
|
||||
<div>Search Unavailable</div>
|
||||
<div>Explore is Unavailable</div>
|
||||
</div>
|
||||
{embeddingsReindexing && allModelsLoaded && (
|
||||
<>
|
||||
<div className="text-center text-primary-variant">
|
||||
Search can be used after tracked object embeddings have
|
||||
Explore can be used after tracked object embeddings have
|
||||
finished reindexing.
|
||||
</div>
|
||||
<div className="pt-5 text-center">
|
||||
@@ -384,8 +384,8 @@ export default function Explore() {
|
||||
<>
|
||||
<div className="text-center text-primary-variant">
|
||||
Frigate is downloading the necessary embeddings models to
|
||||
support semantic searching. This may take several minutes
|
||||
depending on the speed of your network connection.
|
||||
support the Semantic Search feature. This may take several
|
||||
minutes depending on the speed of your network connection.
|
||||
</div>
|
||||
<div className="flex w-96 flex-col gap-2 py-5">
|
||||
<div className="flex flex-row items-center justify-center gap-2">
|
||||
|
||||
@@ -1,19 +1,41 @@
|
||||
import { baseUrl } from "@/api/baseUrl";
|
||||
import Chip from "@/components/indicators/Chip";
|
||||
import AddFaceIcon from "@/components/icons/AddFaceIcon";
|
||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||
import UploadImageDialog from "@/components/overlay/dialog/UploadImageDialog";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import {
|
||||
DropdownMenu,
|
||||
DropdownMenuContent,
|
||||
DropdownMenuItem,
|
||||
DropdownMenuLabel,
|
||||
DropdownMenuTrigger,
|
||||
} from "@/components/ui/dropdown-menu";
|
||||
import { ScrollArea, ScrollBar } from "@/components/ui/scroll-area";
|
||||
import { Toaster } from "@/components/ui/sonner";
|
||||
import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip";
|
||||
import useOptimisticState from "@/hooks/use-optimistic-state";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { FrigateConfig } from "@/types/frigateConfig";
|
||||
import axios from "axios";
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import { isDesktop } from "react-device-detect";
|
||||
import { LuImagePlus, LuTrash } from "react-icons/lu";
|
||||
import { LuImagePlus, LuTrash2 } from "react-icons/lu";
|
||||
import { toast } from "sonner";
|
||||
import useSWR from "swr";
|
||||
|
||||
export default function FaceLibrary() {
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
|
||||
// title
|
||||
|
||||
useEffect(() => {
|
||||
document.title = "Face Library - Frigate";
|
||||
}, []);
|
||||
|
||||
const [page, setPage] = useState<string>();
|
||||
const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100);
|
||||
const tabsRef = useRef<HTMLDivElement | null>(null);
|
||||
@@ -24,7 +46,7 @@ export default function FaceLibrary() {
|
||||
|
||||
const faces = useMemo<string[]>(
|
||||
() =>
|
||||
faceData ? Object.keys(faceData).filter((face) => face != "debug") : [],
|
||||
faceData ? Object.keys(faceData).filter((face) => face != "train") : [],
|
||||
[faceData],
|
||||
);
|
||||
const faceImages = useMemo<string[]>(
|
||||
@@ -32,24 +54,24 @@ export default function FaceLibrary() {
|
||||
[pageToggle, faceData],
|
||||
);
|
||||
|
||||
const faceAttempts = useMemo<string[]>(
|
||||
() => faceData?.["debug"] || [],
|
||||
const trainImages = useMemo<string[]>(
|
||||
() => faceData?.["train"] || [],
|
||||
[faceData],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (!pageToggle) {
|
||||
if (faceAttempts.length > 0) {
|
||||
setPageToggle("attempts");
|
||||
if (trainImages.length > 0) {
|
||||
setPageToggle("train");
|
||||
} else if (faces) {
|
||||
setPageToggle(faces[0]);
|
||||
}
|
||||
} else if (pageToggle == "attempts" && faceAttempts.length == 0) {
|
||||
} else if (pageToggle == "train" && trainImages.length == 0) {
|
||||
setPageToggle(faces[0]);
|
||||
}
|
||||
// we need to listen on the value of the faces list
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [faceAttempts, faces]);
|
||||
}, [trainImages, faces]);
|
||||
|
||||
// upload
|
||||
|
||||
@@ -91,6 +113,10 @@ export default function FaceLibrary() {
|
||||
[pageToggle, refreshFaces],
|
||||
);
|
||||
|
||||
if (!config) {
|
||||
return <ActivityIndicator />;
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex size-full flex-col p-2">
|
||||
<Toaster />
|
||||
@@ -103,7 +129,7 @@ export default function FaceLibrary() {
|
||||
onSave={onUploadImage}
|
||||
/>
|
||||
|
||||
<div className="relative flex h-11 w-full items-center justify-between">
|
||||
<div className="relative mb-2 flex h-11 w-full items-center justify-between">
|
||||
<ScrollArea className="w-full whitespace-nowrap">
|
||||
<div ref={tabsRef} className="flex flex-row">
|
||||
<ToggleGroup
|
||||
@@ -117,15 +143,15 @@ export default function FaceLibrary() {
|
||||
}
|
||||
}}
|
||||
>
|
||||
{faceAttempts.length > 0 && (
|
||||
{trainImages.length > 0 && (
|
||||
<>
|
||||
<ToggleGroupItem
|
||||
value="attempts"
|
||||
className={`flex scroll-mx-10 items-center justify-between gap-2 ${pageToggle == "attempts" ? "" : "*:text-muted-foreground"}`}
|
||||
data-nav-item="attempts"
|
||||
aria-label="Select attempts"
|
||||
value="train"
|
||||
className={`flex scroll-mx-10 items-center justify-between gap-2 ${pageToggle == "train" ? "" : "*:text-muted-foreground"}`}
|
||||
data-nav-item="train"
|
||||
aria-label="Select train"
|
||||
>
|
||||
<div>Attempts</div>
|
||||
<div>Train</div>
|
||||
</ToggleGroupItem>
|
||||
<div>|</div>
|
||||
</>
|
||||
@@ -139,22 +165,32 @@ export default function FaceLibrary() {
|
||||
data-nav-item={item}
|
||||
aria-label={`Select ${item}`}
|
||||
>
|
||||
<div className="capitalize">{item}</div>
|
||||
<div className="capitalize">
|
||||
{item} ({faceData[item].length})
|
||||
</div>
|
||||
</ToggleGroupItem>
|
||||
))}
|
||||
</ToggleGroup>
|
||||
<ScrollBar orientation="horizontal" className="h-0" />
|
||||
</div>
|
||||
</ScrollArea>
|
||||
<Button className="flex gap-2" onClick={() => setUpload(true)}>
|
||||
<LuImagePlus className="size-7 rounded-md p-1 text-secondary-foreground" />
|
||||
Upload Image
|
||||
</Button>
|
||||
</div>
|
||||
{pageToggle &&
|
||||
(pageToggle == "attempts" ? (
|
||||
<AttemptsGrid attemptImages={faceAttempts} onRefresh={refreshFaces} />
|
||||
(pageToggle == "train" ? (
|
||||
<TrainingGrid
|
||||
config={config}
|
||||
attemptImages={trainImages}
|
||||
faceNames={faces}
|
||||
onRefresh={refreshFaces}
|
||||
/>
|
||||
) : (
|
||||
<FaceGrid
|
||||
faceImages={faceImages}
|
||||
pageToggle={pageToggle}
|
||||
setUpload={setUpload}
|
||||
onRefresh={refreshFaces}
|
||||
/>
|
||||
))}
|
||||
@@ -162,15 +198,28 @@ export default function FaceLibrary() {
|
||||
);
|
||||
}
|
||||
|
||||
type AttemptsGridProps = {
|
||||
type TrainingGridProps = {
|
||||
config: FrigateConfig;
|
||||
attemptImages: string[];
|
||||
faceNames: string[];
|
||||
onRefresh: () => void;
|
||||
};
|
||||
function AttemptsGrid({ attemptImages, onRefresh }: AttemptsGridProps) {
|
||||
function TrainingGrid({
|
||||
config,
|
||||
attemptImages,
|
||||
faceNames,
|
||||
onRefresh,
|
||||
}: TrainingGridProps) {
|
||||
return (
|
||||
<div className="scrollbar-container flex flex-wrap gap-2 overflow-y-scroll">
|
||||
{attemptImages.map((image: string) => (
|
||||
<FaceAttempt key={image} image={image} onRefresh={onRefresh} />
|
||||
<FaceAttempt
|
||||
key={image}
|
||||
image={image}
|
||||
faceNames={faceNames}
|
||||
threshold={config.face_recognition.threshold}
|
||||
onRefresh={onRefresh}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
);
|
||||
@@ -178,11 +227,16 @@ function AttemptsGrid({ attemptImages, onRefresh }: AttemptsGridProps) {
|
||||
|
||||
type FaceAttemptProps = {
|
||||
image: string;
|
||||
faceNames: string[];
|
||||
threshold: number;
|
||||
onRefresh: () => void;
|
||||
};
|
||||
function FaceAttempt({ image, onRefresh }: FaceAttemptProps) {
|
||||
const [hovered, setHovered] = useState(false);
|
||||
|
||||
function FaceAttempt({
|
||||
image,
|
||||
faceNames,
|
||||
threshold,
|
||||
onRefresh,
|
||||
}: FaceAttemptProps) {
|
||||
const data = useMemo(() => {
|
||||
const parts = image.split("-");
|
||||
|
||||
@@ -193,9 +247,36 @@ function FaceAttempt({ image, onRefresh }: FaceAttemptProps) {
|
||||
};
|
||||
}, [image]);
|
||||
|
||||
const onTrainAttempt = useCallback(
|
||||
(trainName: string) => {
|
||||
axios
|
||||
.post(`/faces/train/${trainName}/classify`, { training_file: image })
|
||||
.then((resp) => {
|
||||
if (resp.status == 200) {
|
||||
toast.success(`Successfully trained face.`, {
|
||||
position: "top-center",
|
||||
});
|
||||
onRefresh();
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
if (error.response?.data?.message) {
|
||||
toast.error(`Failed to train: ${error.response.data.message}`, {
|
||||
position: "top-center",
|
||||
});
|
||||
} else {
|
||||
toast.error(`Failed to train: ${error.message}`, {
|
||||
position: "top-center",
|
||||
});
|
||||
}
|
||||
});
|
||||
},
|
||||
[image, onRefresh],
|
||||
);
|
||||
|
||||
const onDelete = useCallback(() => {
|
||||
axios
|
||||
.post(`/faces/debug/delete`, { ids: [image] })
|
||||
.post(`/faces/train/delete`, { ids: [image] })
|
||||
.then((resp) => {
|
||||
if (resp.status == 200) {
|
||||
toast.success(`Successfully deleted face.`, {
|
||||
@@ -218,28 +299,58 @@ function FaceAttempt({ image, onRefresh }: FaceAttemptProps) {
|
||||
}, [image, onRefresh]);
|
||||
|
||||
return (
|
||||
<div
|
||||
className="relative h-min"
|
||||
onMouseEnter={isDesktop ? () => setHovered(true) : undefined}
|
||||
onMouseLeave={isDesktop ? () => setHovered(false) : undefined}
|
||||
onClick={isDesktop ? undefined : () => setHovered(!hovered)}
|
||||
>
|
||||
{hovered && (
|
||||
<div className="absolute right-1 top-1">
|
||||
<Chip
|
||||
className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500"
|
||||
onClick={() => onDelete()}
|
||||
>
|
||||
<LuTrash className="size-4 fill-destructive text-destructive" />
|
||||
</Chip>
|
||||
<div className="relative flex flex-col rounded-lg">
|
||||
<div className="w-full overflow-hidden rounded-t-lg border border-t-0 *:text-card-foreground">
|
||||
<img className="h-40" src={`${baseUrl}clips/faces/train/${image}`} />
|
||||
</div>
|
||||
<div className="rounded-b-lg bg-card p-2">
|
||||
<div className="flex w-full flex-row items-center justify-between gap-2">
|
||||
<div className="flex flex-col items-start text-xs text-primary-variant">
|
||||
<div className="capitalize">{data.name}</div>
|
||||
<div
|
||||
className={cn(
|
||||
Number.parseFloat(data.score) >= threshold
|
||||
? "text-success"
|
||||
: "text-danger",
|
||||
)}
|
||||
>
|
||||
{Number.parseFloat(data.score) * 100}%
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex flex-row items-start justify-end gap-5 md:gap-4">
|
||||
<Tooltip>
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger>
|
||||
<TooltipTrigger>
|
||||
<AddFaceIcon className="size-5 cursor-pointer text-primary-variant hover:text-primary" />
|
||||
</TooltipTrigger>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent>
|
||||
<DropdownMenuLabel>Train Face as:</DropdownMenuLabel>
|
||||
{faceNames.map((faceName) => (
|
||||
<DropdownMenuItem
|
||||
key={faceName}
|
||||
className="cursor-pointer capitalize"
|
||||
onClick={() => onTrainAttempt(faceName)}
|
||||
>
|
||||
{faceName}
|
||||
</DropdownMenuItem>
|
||||
))}
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
<TooltipContent>Train Face as Person</TooltipContent>
|
||||
</Tooltip>
|
||||
<Tooltip>
|
||||
<TooltipTrigger>
|
||||
<LuTrash2
|
||||
className="size-5 cursor-pointer text-primary-variant hover:text-primary"
|
||||
onClick={onDelete}
|
||||
/>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>Delete Face Attempt</TooltipContent>
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
<div className="rounded-md bg-secondary">
|
||||
<img
|
||||
className="h-40 rounded-md"
|
||||
src={`${baseUrl}clips/faces/debug/${image}`}
|
||||
/>
|
||||
<div className="p-2">{`${data.name}: ${data.score}`}</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
@@ -248,15 +359,9 @@ function FaceAttempt({ image, onRefresh }: FaceAttemptProps) {
|
||||
type FaceGridProps = {
|
||||
faceImages: string[];
|
||||
pageToggle: string;
|
||||
setUpload: (upload: boolean) => void;
|
||||
onRefresh: () => void;
|
||||
};
|
||||
function FaceGrid({
|
||||
faceImages,
|
||||
pageToggle,
|
||||
setUpload,
|
||||
onRefresh,
|
||||
}: FaceGridProps) {
|
||||
function FaceGrid({ faceImages, pageToggle, onRefresh }: FaceGridProps) {
|
||||
return (
|
||||
<div className="scrollbar-container flex flex-wrap gap-2 overflow-y-scroll">
|
||||
{faceImages.map((image: string) => (
|
||||
@@ -267,9 +372,6 @@ function FaceGrid({
|
||||
onRefresh={onRefresh}
|
||||
/>
|
||||
))}
|
||||
<Button key="upload" className="size-40" onClick={() => setUpload(true)}>
|
||||
<LuImagePlus className="size-10" />
|
||||
</Button>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -280,8 +382,6 @@ type FaceImageProps = {
|
||||
onRefresh: () => void;
|
||||
};
|
||||
function FaceImage({ name, image, onRefresh }: FaceImageProps) {
|
||||
const [hovered, setHovered] = useState(false);
|
||||
|
||||
const onDelete = useCallback(() => {
|
||||
axios
|
||||
.post(`/faces/${name}/delete`, { ids: [image] })
|
||||
@@ -307,26 +407,28 @@ function FaceImage({ name, image, onRefresh }: FaceImageProps) {
|
||||
}, [name, image, onRefresh]);
|
||||
|
||||
return (
|
||||
<div
|
||||
className="relative h-40"
|
||||
onMouseEnter={isDesktop ? () => setHovered(true) : undefined}
|
||||
onMouseLeave={isDesktop ? () => setHovered(false) : undefined}
|
||||
onClick={isDesktop ? undefined : () => setHovered(!hovered)}
|
||||
>
|
||||
{hovered && (
|
||||
<div className="absolute right-1 top-1">
|
||||
<Chip
|
||||
className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500"
|
||||
onClick={() => onDelete()}
|
||||
>
|
||||
<LuTrash className="size-4 fill-destructive text-destructive" />
|
||||
</Chip>
|
||||
<div className="relative flex flex-col rounded-lg">
|
||||
<div className="w-full overflow-hidden rounded-t-lg border border-t-0 *:text-card-foreground">
|
||||
<img className="h-40" src={`${baseUrl}clips/faces/${name}/${image}`} />
|
||||
</div>
|
||||
<div className="rounded-b-lg bg-card p-2">
|
||||
<div className="flex w-full flex-row items-center justify-between gap-2">
|
||||
<div className="flex flex-col items-start text-xs text-primary-variant">
|
||||
<div className="capitalize">{name}</div>
|
||||
</div>
|
||||
<div className="flex flex-row items-start justify-end gap-5 md:gap-4">
|
||||
<Tooltip>
|
||||
<TooltipTrigger>
|
||||
<LuTrash2
|
||||
className="size-5 cursor-pointer text-primary-variant hover:text-primary"
|
||||
onClick={onDelete}
|
||||
/>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>Delete Face Attempt</TooltipContent>
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
<img
|
||||
className="h-40 rounded-md"
|
||||
src={`${baseUrl}clips/faces/${name}/${image}`}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ import UiSettingsView from "@/views/settings/UiSettingsView";
|
||||
|
||||
const allSettingsViews = [
|
||||
"UI settings",
|
||||
"search settings",
|
||||
"explore settings",
|
||||
"camera settings",
|
||||
"masks / zones",
|
||||
"motion tuner",
|
||||
@@ -175,7 +175,7 @@ export default function Settings() {
|
||||
</div>
|
||||
<div className="mt-2 flex h-full w-full flex-col items-start md:h-dvh md:pb-24">
|
||||
{page == "UI settings" && <UiSettingsView />}
|
||||
{page == "search settings" && (
|
||||
{page == "explore settings" && (
|
||||
<SearchSettingsView setUnsavedChanges={setUnsavedChanges} />
|
||||
)}
|
||||
{page == "debug" && (
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import useSWR from "swr";
|
||||
import { FrigateStats } from "@/types/stats";
|
||||
import { useEffect, useState } from "react";
|
||||
import { useEffect, useMemo, useState } from "react";
|
||||
import TimeAgo from "@/components/dynamic/TimeAgo";
|
||||
import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
|
||||
import { isDesktop, isMobile } from "react-device-detect";
|
||||
import GeneralMetrics from "@/views/system/GeneralMetrics";
|
||||
import StorageMetrics from "@/views/system/StorageMetrics";
|
||||
import { LuActivity, LuHardDrive } from "react-icons/lu";
|
||||
import { LuActivity, LuHardDrive, LuSearchCode } from "react-icons/lu";
|
||||
import { FaVideo } from "react-icons/fa";
|
||||
import Logo from "@/components/Logo";
|
||||
import useOptimisticState from "@/hooks/use-optimistic-state";
|
||||
@@ -14,11 +14,28 @@ import CameraMetrics from "@/views/system/CameraMetrics";
|
||||
import { useHashState } from "@/hooks/use-overlay-state";
|
||||
import { capitalizeFirstLetter } from "@/utils/stringUtil";
|
||||
import { Toaster } from "@/components/ui/sonner";
|
||||
import { FrigateConfig } from "@/types/frigateConfig";
|
||||
import FeatureMetrics from "@/views/system/FeatureMetrics";
|
||||
|
||||
const metrics = ["general", "storage", "cameras"] as const;
|
||||
type SystemMetric = (typeof metrics)[number];
|
||||
const allMetrics = ["general", "features", "storage", "cameras"] as const;
|
||||
type SystemMetric = (typeof allMetrics)[number];
|
||||
|
||||
function System() {
|
||||
const { data: config } = useSWR<FrigateConfig>("config", {
|
||||
revalidateOnFocus: false,
|
||||
});
|
||||
|
||||
const metrics = useMemo(() => {
|
||||
const metrics = [...allMetrics];
|
||||
|
||||
if (!config?.semantic_search.enabled) {
|
||||
const index = metrics.indexOf("features");
|
||||
metrics.splice(index, 1);
|
||||
}
|
||||
|
||||
return metrics;
|
||||
}, [config]);
|
||||
|
||||
// stats page
|
||||
|
||||
const [page, setPage] = useHashState<SystemMetric>();
|
||||
@@ -67,6 +84,7 @@ function System() {
|
||||
aria-label={`Select ${item}`}
|
||||
>
|
||||
{item == "general" && <LuActivity className="size-4" />}
|
||||
{item == "features" && <LuSearchCode className="size-4" />}
|
||||
{item == "storage" && <LuHardDrive className="size-4" />}
|
||||
{item == "cameras" && <FaVideo className="size-4" />}
|
||||
{isDesktop && <div className="capitalize">{item}</div>}
|
||||
@@ -96,6 +114,12 @@ function System() {
|
||||
setLastUpdated={setLastUpdated}
|
||||
/>
|
||||
)}
|
||||
{page == "features" && (
|
||||
<FeatureMetrics
|
||||
lastUpdated={lastUpdated}
|
||||
setLastUpdated={setLastUpdated}
|
||||
/>
|
||||
)}
|
||||
{page == "storage" && <StorageMetrics setLastUpdated={setLastUpdated} />}
|
||||
{page == "cameras" && (
|
||||
<CameraMetrics
|
||||
|
||||
@@ -290,6 +290,7 @@ export interface FrigateConfig {
|
||||
|
||||
face_recognition: {
|
||||
enabled: boolean;
|
||||
threshold: number;
|
||||
};
|
||||
|
||||
ffmpeg: {
|
||||
|
||||