Compare commits

..

3 Commits

Author SHA1 Message Date
Josh Hawkins
b0c4c77cfd update docs 2024-10-21 09:36:16 -05:00
Josh Hawkins
059475e6bb add try/except around ollama initialization 2024-10-21 09:31:34 -05:00
Josh Hawkins
8002e59031 disable mem arena in options for cpu only 2024-10-21 09:31:11 -05:00
198 changed files with 2572 additions and 5310 deletions

View File

@@ -12,7 +12,6 @@ argmax
argmin
argpartition
ascontiguousarray
astype
authelia
authentik
autodetected
@@ -43,7 +42,6 @@ codeproject
colormap
colorspace
comms
coro
ctypeslib
CUDA
Cuvid
@@ -61,7 +59,6 @@ dsize
dtype
ECONNRESET
edgetpu
fastapi
faststart
fflags
ffprobe
@@ -196,7 +193,6 @@ poweroff
preexec
probesize
protobuf
pstate
psutil
pubkey
putenv
@@ -241,7 +237,6 @@ sleeptime
SNDMORE
socs
sqliteq
sqlitevecq
ssdlite
statm
stimeout
@@ -276,11 +271,9 @@ unraid
unreviewed
userdata
usermod
uvicorn
vaapi
vainfo
variations
vbios
vconcat
vitb
vstream

View File

@@ -3,12 +3,10 @@
set -euxo pipefail
# Cleanup the old github host key
if [[ -f ~/.ssh/known_hosts ]]; then
# Add new github host key
sed -i -e '/AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31\/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi\/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==/d' ~/.ssh/known_hosts
curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \
sed -e 's/^/github.com /' >> ~/.ssh/known_hosts
fi
sed -i -e '/AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31\/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi\/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==/d' ~/.ssh/known_hosts
# Add new github host key
curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \
sed -e 's/^/github.com /' >> ~/.ssh/known_hosts
# Frigate normal container runs as root, so it have permission to create
# the folders. But the devcontainer runs as the host user, so we need to

View File

@@ -13,7 +13,6 @@
- [ ] New feature
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [ ] Code quality improvements to existing code
- [ ] Documentation Update
## Additional information

View File

@@ -6,8 +6,6 @@ on:
branches:
- dev
- master
paths-ignore:
- 'docs/**'
# only run the latest commit to avoid cache overwrites
concurrency:

View File

@@ -1,9 +1,6 @@
name: On pull request
on:
pull_request:
paths-ignore:
- 'docs/**'
on: pull_request
env:
DEFAULT_PYTHON: 3.9

View File

@@ -34,14 +34,14 @@ jobs:
STABLE_TAG=${BASE}:stable
PULL_TAG=${BASE}:${BUILD_TAG}
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG}
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk h8l rocm; do
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk; do
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant}
done
# stable tag
if [[ "${BUILD_TYPE}" == "stable" ]]; then
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${STABLE_TAG}
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk h8l rocm; do
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk; do
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${STABLE_TAG}-${variant}
done
fi

View File

@@ -23,7 +23,7 @@ services:
# count: 1
# capabilities: [gpu]
environment:
YOLO_MODELS: ""
YOLO_MODELS: yolov7-320
devices:
- /dev/bus/usb:/dev/bus/usb
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware

View File

@@ -16,25 +16,89 @@ RUN mkdir /h8l-wheels
# Build the wheels
RUN pip3 wheel --wheel-dir=/h8l-wheels -c /requirements-wheels.txt -r /requirements-wheels-h8l.txt
FROM wget AS hailort
# Build HailoRT and create wheel
FROM wheels AS build-hailort
ARG TARGETARCH
RUN --mount=type=bind,source=docker/hailo8l/install_hailort.sh,target=/deps/install_hailort.sh \
/deps/install_hailort.sh
SHELL ["/bin/bash", "-c"]
# Install necessary APT packages
RUN apt-get -qq update \
&& apt-get -qq install -y \
apt-transport-https \
gnupg \
wget \
# the key fingerprint can be obtained from https://ftp-master.debian.org/keys.html
&& wget -qO- "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xA4285295FC7B1A81600062A9605C66F00D6C9793" | \
gpg --dearmor > /usr/share/keyrings/debian-archive-bullseye-stable.gpg \
&& echo "deb [signed-by=/usr/share/keyrings/debian-archive-bullseye-stable.gpg] http://deb.debian.org/debian bullseye main contrib non-free" | \
tee /etc/apt/sources.list.d/debian-bullseye-nonfree.list \
&& apt-get -qq update \
&& apt-get -qq install -y \
python3.9 \
python3.9-dev \
build-essential cmake git \
&& rm -rf /var/lib/apt/lists/*
# Extract Python version and set environment variables
RUN PYTHON_VERSION=$(python3 --version 2>&1 | awk '{print $2}' | cut -d. -f1,2) && \
PYTHON_VERSION_NO_DOT=$(echo $PYTHON_VERSION | sed 's/\.//') && \
echo "PYTHON_VERSION=$PYTHON_VERSION" > /etc/environment && \
echo "PYTHON_VERSION_NO_DOT=$PYTHON_VERSION_NO_DOT" >> /etc/environment
# Clone and build HailoRT
RUN . /etc/environment && \
git clone https://github.com/hailo-ai/hailort.git /opt/hailort && \
cd /opt/hailort && \
git checkout v4.18.0 && \
cmake -H. -Bbuild -DCMAKE_BUILD_TYPE=Release -DHAILO_BUILD_PYBIND=1 -DPYBIND11_PYTHON_VERSION=${PYTHON_VERSION} && \
cmake --build build --config release --target libhailort && \
cmake --build build --config release --target _pyhailort && \
cp build/hailort/libhailort/bindings/python/src/_pyhailort.cpython-${PYTHON_VERSION_NO_DOT}-$(if [ $TARGETARCH == "amd64" ]; then echo 'x86_64'; else echo 'aarch64'; fi )-linux-gnu.so hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/ && \
cp build/hailort/libhailort/src/libhailort.so hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/
RUN ls -ahl /opt/hailort/build/hailort/libhailort/src/
RUN ls -ahl /opt/hailort/hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/
# Remove the existing setup.py if it exists in the target directory
RUN rm -f /opt/hailort/hailort/libhailort/bindings/python/platform/setup.py
# Copy generate_wheel_conf.py and setup.py
COPY docker/hailo8l/pyhailort_build_scripts/generate_wheel_conf.py /opt/hailort/hailort/libhailort/bindings/python/platform/generate_wheel_conf.py
COPY docker/hailo8l/pyhailort_build_scripts/setup.py /opt/hailort/hailort/libhailort/bindings/python/platform/setup.py
# Run the generate_wheel_conf.py script
RUN python3 /opt/hailort/hailort/libhailort/bindings/python/platform/generate_wheel_conf.py
# Create a wheel file using pip3 wheel
RUN cd /opt/hailort/hailort/libhailort/bindings/python/platform && \
python3 setup.py bdist_wheel --dist-dir /hailo-wheels
# Use deps as the base image
FROM deps AS h8l-frigate
# Copy the wheels from the wheels stage
COPY --from=h8l-wheels /h8l-wheels /deps/h8l-wheels
COPY --from=hailort /hailo-wheels /deps/hailo-wheels
COPY --from=hailort /rootfs/ /
COPY --from=build-hailort /hailo-wheels /deps/hailo-wheels
COPY --from=build-hailort /etc/environment /etc/environment
RUN CC=$(python3 -c "import sysconfig; import shlex; cc = sysconfig.get_config_var('CC'); cc_cmd = shlex.split(cc)[0]; print(cc_cmd[:-4] if cc_cmd.endswith('-gcc') else cc_cmd)") && \
echo "CC=$CC" >> /etc/environment
# Install the wheels
RUN pip3 install -U /deps/h8l-wheels/*.whl
RUN pip3 install -U /deps/hailo-wheels/*.whl
RUN . /etc/environment && \
mv /usr/local/lib/python${PYTHON_VERSION}/dist-packages/hailo_platform/pyhailort/libhailort.so /usr/lib/${CC} && \
cd /usr/lib/${CC}/ && \
ln -s libhailort.so libhailort.so.4.18.0
# Copy base files from the rootfs stage
COPY --from=rootfs / /
# Set environment variables for Hailo SDK
ENV PATH="/opt/hailort/bin:${PATH}"
ENV LD_LIBRARY_PATH="/usr/lib/$(if [ $TARGETARCH == "amd64" ]; then echo 'x86_64'; else echo 'aarch64'; fi )-linux-gnu:${LD_LIBRARY_PATH}"
# Set workdir
WORKDIR /opt/frigate/

View File

@@ -1,9 +1,3 @@
target wget {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64","linux/amd64"]
target = "wget"
}
target wheels {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64","linux/amd64"]
@@ -25,7 +19,6 @@ target rootfs {
target h8l {
dockerfile = "docker/hailo8l/Dockerfile"
contexts = {
wget = "target:wget"
wheels = "target:wheels"
deps = "target:deps"
rootfs = "target:rootfs"

View File

@@ -1,19 +0,0 @@
#!/bin/bash
set -euxo pipefail
hailo_version="4.19.0"
if [[ "${TARGETARCH}" == "amd64" ]]; then
arch="x86_64"
elif [[ "${TARGETARCH}" == "arm64" ]]; then
arch="aarch64"
fi
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${TARGETARCH}.tar.gz" |
tar -C / -xzf -
mkdir -p /hailo-wheels
wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp39-cp39-linux_${arch}.whl"

View File

@@ -0,0 +1,67 @@
import json
import os
import platform
import sys
import sysconfig
def extract_toolchain_info(compiler):
# Remove the "-gcc" or "-g++" suffix if present
if compiler.endswith("-gcc") or compiler.endswith("-g++"):
compiler = compiler.rsplit("-", 1)[0]
# Extract the toolchain and ABI part (e.g., "gnu")
toolchain_parts = compiler.split("-")
abi_conventions = next(
(part for part in toolchain_parts if part in ["gnu", "musl", "eabi", "uclibc"]),
"",
)
return abi_conventions
def generate_wheel_conf():
conf_file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "wheel_conf.json"
)
# Extract current system and Python version information
py_version = f"cp{sys.version_info.major}{sys.version_info.minor}"
arch = platform.machine()
system = platform.system().lower()
libc_version = platform.libc_ver()[1]
# Get the compiler information
compiler = sysconfig.get_config_var("CC")
abi_conventions = extract_toolchain_info(compiler)
# Create the new configuration data
new_conf_data = {
"py_version": py_version,
"arch": arch,
"system": system,
"libc_version": libc_version,
"abi": abi_conventions,
"extension": {
"posix": "so",
"nt": "pyd", # Windows
}[os.name],
}
# If the file exists, load the existing data
if os.path.isfile(conf_file_path):
with open(conf_file_path, "r") as conf_file:
conf_data = json.load(conf_file)
# Update the existing data with the new data
conf_data.update(new_conf_data)
else:
# If the file does not exist, use the new data
conf_data = new_conf_data
# Write the updated data to the file
with open(conf_file_path, "w") as conf_file:
json.dump(conf_data, conf_file, indent=4)
if __name__ == "__main__":
generate_wheel_conf()

View File

@@ -0,0 +1,111 @@
import json
import os
from setuptools import find_packages, setup
from wheel.bdist_wheel import bdist_wheel as orig_bdist_wheel
class NonPurePythonBDistWheel(orig_bdist_wheel):
"""Makes the wheel platform-dependent so it can be based on the _pyhailort architecture"""
def finalize_options(self):
orig_bdist_wheel.finalize_options(self)
self.root_is_pure = False
def _get_hailort_lib_path():
lib_filename = "libhailort.so"
lib_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
f"hailo_platform/pyhailort/{lib_filename}",
)
if os.path.exists(lib_path):
print(f"Found libhailort shared library at: {lib_path}")
else:
print(f"Error: libhailort shared library not found at: {lib_path}")
raise FileNotFoundError(f"libhailort shared library not found at: {lib_path}")
return lib_path
def _get_pyhailort_lib_path():
conf_file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "wheel_conf.json"
)
if not os.path.isfile(conf_file_path):
raise FileNotFoundError(f"Configuration file not found: {conf_file_path}")
with open(conf_file_path, "r") as conf_file:
content = json.load(conf_file)
py_version = content["py_version"]
arch = content["arch"]
system = content["system"]
extension = content["extension"]
abi = content["abi"]
# Construct the filename directly
lib_filename = f"_pyhailort.cpython-{py_version.split('cp')[1]}-{arch}-{system}-{abi}.{extension}"
lib_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
f"hailo_platform/pyhailort/{lib_filename}",
)
if os.path.exists(lib_path):
print(f"Found _pyhailort shared library at: {lib_path}")
else:
print(f"Error: _pyhailort shared library not found at: {lib_path}")
raise FileNotFoundError(
f"_pyhailort shared library not found at: {lib_path}"
)
return lib_path
def _get_package_paths():
packages = []
pyhailort_lib = _get_pyhailort_lib_path()
hailort_lib = _get_hailort_lib_path()
if pyhailort_lib:
packages.append(pyhailort_lib)
if hailort_lib:
packages.append(hailort_lib)
packages.append(os.path.abspath("hailo_tutorials/notebooks/*"))
packages.append(os.path.abspath("hailo_tutorials/hefs/*"))
return packages
if __name__ == "__main__":
setup(
author="Hailo team",
author_email="contact@hailo.ai",
cmdclass={
"bdist_wheel": NonPurePythonBDistWheel,
},
description="HailoRT",
entry_points={
"console_scripts": [
"hailo=hailo_platform.tools.hailocli.main:main",
]
},
install_requires=[
"argcomplete",
"contextlib2",
"future",
"netaddr",
"netifaces",
"verboselogs",
"numpy==1.23.3",
],
name="hailort",
package_data={
"hailo_platform": _get_package_paths(),
},
packages=find_packages(),
platforms=[
"linux_x86_64",
"linux_aarch64",
"win_amd64",
],
url="https://hailo.ai/",
version="4.17.0",
zip_safe=False,
)

View File

@@ -13,7 +13,7 @@ else
fi
# Clone the HailoRT driver repository
git clone --depth 1 --branch v4.19.0 https://github.com/hailo-ai/hailort-drivers.git
git clone --depth 1 --branch v4.18.0 https://github.com/hailo-ai/hailort-drivers.git
# Build and install the HailoRT driver
cd hailort-drivers/linux/pcie
@@ -38,7 +38,7 @@ cd ../../
if [ ! -d /lib/firmware/hailo ]; then
sudo mkdir /lib/firmware/hailo
fi
sudo mv hailo8_fw.*.bin /lib/firmware/hailo/hailo8_fw.bin
sudo mv hailo8_fw.4.17.0.bin /lib/firmware/hailo/hailo8_fw.bin
# Install udev rules
sudo cp ./linux/pcie/51-hailo-udev.rules /etc/udev/rules.d/

View File

@@ -211,9 +211,6 @@ ENV TOKENIZERS_PARALLELISM=true
# https://github.com/huggingface/transformers/issues/27214
ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
ENV OPENCV_FFMPEG_LOGLEVEL=8
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
ENV LIBAVFORMAT_VERSION_MAJOR=60

View File

@@ -87,8 +87,8 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y \
intel-opencl-icd=24.35.30872.31-996~22.04 intel-level-zero-gpu=1.3.29735.27-914~22.04 intel-media-va-driver-non-free=24.3.3-996~22.04 \
libmfx1=23.2.2-880~22.04 libmfxgen1=24.2.4-914~22.04 libvpl2=1:2.13.0.0-996~22.04
intel-opencl-icd intel-level-zero-gpu intel-media-va-driver-non-free \
libmfx1 libmfxgen1 libvpl2
rm -f /usr/share/keyrings/intel-graphics.gpg
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list

View File

@@ -1,11 +1,9 @@
click == 8.1.*
# FastAPI
aiohttp == 3.11.2
starlette == 0.41.2
starlette-context == 0.3.6
fastapi == 0.115.*
fastapi == 0.115.0
uvicorn == 0.30.*
slowapi == 0.1.*
slowapi == 0.1.9
imutils == 0.5.*
joserfc == 1.0.*
pathvalidate == 3.2.*
@@ -18,10 +16,10 @@ paho-mqtt == 2.1.*
pandas == 2.2.*
peewee == 3.17.*
peewee_migrate == 1.13.*
psutil == 6.1.*
psutil == 5.9.*
pydantic == 2.8.*
git+https://github.com/fbcotter/py3nvml#egg=py3nvml
pytz == 2024.*
pytz == 2024.1
pyzmq == 26.2.*
ruamel.yaml == 0.18.*
tzlocal == 5.2

View File

@@ -165,7 +165,7 @@ if config.get("birdseye", {}).get("restream", False):
birdseye: dict[str, any] = config.get("birdseye")
input = f"-f rawvideo -pix_fmt yuv420p -video_size {birdseye.get('width', 1280)}x{birdseye.get('height', 720)} -r 10 -i {BIRDSEYE_PIPE}"
ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(ffmpeg_path, config.get('ffmpeg', {}).get('hwaccel_args', ''), input, '-rtsp_transport tcp -f rtsp {output}')}"
ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(ffmpeg_path, config.get('ffmpeg', {}).get('hwaccel_args'), input, '-rtsp_transport tcp -f rtsp {output}')}"
if go2rtc_config.get("streams"):
go2rtc_config["streams"]["birdseye"] = ffmpeg_cmd

View File

@@ -10,8 +10,8 @@ ARG DEBIAN_FRONTEND
# Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \
&& apt-get -qq install -y --no-install-recommends \
python3.9 python3.9-dev \
wget build-essential cmake git \
python3.9 python3.9-dev \
wget build-essential cmake git \
&& rm -rf /var/lib/apt/lists/*
# Ensure python3 defaults to python3.9
@@ -41,11 +41,7 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,t
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
ADD https://nvidia.box.com/shared/static/9aemm4grzbbkfaesg5l7fplgjtmswhj8.whl /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
RUN pip3 uninstall -y onnxruntime-openvino \
&& pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt \
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
RUN pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
FROM build-wheels AS trt-model-wheels
ARG DEBIAN_FRONTEND

View File

@@ -25,7 +25,7 @@ ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
COPY docker/tensorrt/detector/rootfs/ /
ENV YOLO_MODELS=""
ENV YOLO_MODELS="yolov7-320"
HEALTHCHECK --start-period=600s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1

View File

@@ -11,7 +11,6 @@ set -o errexit -o nounset -o pipefail
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"}
TRT_VER=${TRT_VER:-$(cat /etc/TENSORRT_VER)}
OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}"
YOLO_MODELS=${YOLO_MODELS:-""}
# Create output folder
mkdir -p ${OUTPUT_FOLDER}
@@ -20,11 +19,6 @@ FIRST_MODEL=true
MODEL_DOWNLOAD=""
MODEL_CONVERT=""
if [ -z "$YOLO_MODELS"]; then
echo "tensorrt model preparation disabled"
exit 0
fi
for model in ${YOLO_MODELS//,/ }
do
# Remove old link in case path/version changed

View File

@@ -9,6 +9,6 @@ nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
onnx==1.16.*; platform_machine == 'x86_64'
onnxruntime-gpu==1.18.*; platform_machine == 'x86_64'
onnx==1.14.0; platform_machine == 'x86_64'
onnxruntime-gpu==1.17.*; platform_machine == 'x86_64'
protobuf==3.20.3; platform_machine == 'x86_64'

View File

@@ -1 +1 @@
cuda-python == 11.7; platform_machine == 'aarch64'
cuda-python == 11.7; platform_machine == 'aarch64'

View File

@@ -181,7 +181,7 @@ go2rtc:
- rtspx://192.168.1.1:7441/abcdefghijk
```
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-rtsp)
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-rtsp)
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.

View File

@@ -109,7 +109,7 @@ This list of working and non-working PTZ cameras is based on user feedback.
| Reolink E1 Zoom | ✅ | ❌ | |
| Reolink RLC-823A 16x | ✅ | ❌ | |
| Speco O8P32X | ✅ | ❌ | |
| Sunba 405-D20X | ✅ | ❌ | Incomplete ONVIF support reported on original, and 4k models. All models are suspected incompatable. |
| Sunba 405-D20X | ✅ | ❌ | |
| Tapo | ✅ | ❌ | Many models supported, ONVIF Service Port: 2020 |
| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands |
| Uniview IPC6612SR-X33-VG | ✅ | ✅ | Leave `calibrate_on_startup` as `False`. A user has reported that zooming with `absolute` is working. |

View File

@@ -3,13 +3,9 @@ id: genai
title: Generative AI
---
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects.
:::info
Semantic Search must be enabled to use Generative AI.
:::
Semantic Search must be enabled to use Generative AI. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
## Configuration
@@ -33,21 +29,15 @@ cameras:
## Ollama
:::warning
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance. CPU inference is not recommended.
Using Ollama on CPU is not recommended, high inference times make using Generative AI impractical.
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [docker container](https://hub.docker.com/r/ollama/ollama) available.
:::
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available.
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-does-ollama-handle-concurrent-requests).
Parallel requests also come with some caveats. See the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-does-ollama-handle-concurrent-requests).
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). At the time of writing, this includes `llava`, `llava-llama3`, `llava-phi3`, and `moondream`. Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first i.e. by running `ollama pull llava:7b` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). At the time of writing, this includes `llava`, `llava-llama3`, `llava-phi3`, and `moondream`. Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first.
:::note
@@ -62,7 +52,7 @@ genai:
enabled: True
provider: ollama
base_url: http://localhost:11434
model: llava:7b
model: llava
```
## Google Gemini
@@ -142,10 +132,6 @@ Frigate's thumbnail search excels at identifying specific details about tracked
While generating simple descriptions of detected objects is useful, understanding intent provides a deeper layer of insight. Instead of just recognizing "what" is in a scene, Frigates default prompts aim to infer "why" it might be there or "what" it could do next. Descriptions tell you whats happening, but intent gives context. For instance, a person walking toward a door might seem like a visitor, but if theyre moving quickly after hours, you can infer a potential break-in attempt. Detecting a person loitering near a door at night can trigger an alert sooner than simply noting "a person standing by the door," helping you respond based on the situations context.
### Using GenAI for notifications
Frigate provides an [MQTT topic](/integrations/mqtt), `frigate/tracked_object_update`, that is updated with a JSON payload containing `event_id` and `description` when your AI provider returns a description for a tracked object. This description could be used directly in notifications, such as sending alerts to your phone or making audio announcements. If additional details from the tracked object are needed, you can query the [HTTP API](/integrations/api/event-events-event-id-get) using the `event_id`, eg: `http://frigate_ip:5000/api/events/<event_id>`.
## Custom Prompts
Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:
@@ -176,7 +162,7 @@ genai:
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the thumbnails collected over the object's lifetime to the model. Using a snapshot provides the AI with a higher-resolution image (typically downscaled by the AI itself), but the trade-off is that only a single image is used, which might limit the model's ability to determine object movement or direction.
```yaml
cameras:

View File

@@ -92,16 +92,10 @@ motion:
lightning_threshold: 0.8
```
:::warning
:::tip
Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed.
:::
:::note
Lightning threshold does not stop motion based recordings from being saved.
:::
Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in no motion detection. This is done via the `lightning_threshold` configuration. It is defined as the percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera.

View File

@@ -22,14 +22,14 @@ Frigate supports multiple different detectors that work on different types of ha
- [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured.
**Nvidia**
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs and Jetson devices, using one of many default models.
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` or `-tensorrt-jp(4/5)` Frigate images when a supported ONNX model is configured.
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs, using one of many default models.
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
**Rockchip**
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
**For Testing**
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
:::
@@ -223,7 +223,7 @@ The model used for TensorRT must be preprocessed on the same hardware platform t
The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host.
By default, no models will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
By default, the `yolov7-320` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
If you have a Jetson device with DLAs (Xavier or Orin), you can generate a model that will run on the DLA by appending `-dla` to your model name, e.g. specify `YOLO_MODELS=yolov7-320-dla`. The model will run on DLA0 (Frigate does not currently support DLA1). DLA-incompatible layers will fall back to running on the GPU.
@@ -264,7 +264,7 @@ An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yol
```yml
frigate:
environment:
- YOLO_MODELS=yolov7-320,yolov7x-640
- YOLO_MODELS=yolov4-608,yolov7x-640
- USE_FP16=false
```
@@ -415,24 +415,6 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl
ONNX is an open format for building machine learning models, Frigate supports running ONNX models on CPU, OpenVINO, and TensorRT. On startup Frigate will automatically try to use a GPU if one is available.
:::info
If the correct build is used for your GPU then the GPU will be detected and used automatically.
- **AMD**
- ROCm will automatically be detected and used with the ONNX detector in the `-rocm` Frigate image.
- **Intel**
- OpenVINO will automatically be detected and used with the ONNX detector in the default Frigate image.
- **Nvidia**
- Nvidia GPUs will automatically be detected and used with the ONNX detector in the `-tensorrt` Frigate image.
- Jetson devices will automatically be detected and used with the ONNX detector in the `-tensorrt-jp(4/5)` Frigate image.
:::
:::tip
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
@@ -475,7 +457,6 @@ model:
width: 320 # <--- should match whatever was set in notebook
height: 320 # <--- should match whatever was set in notebook
input_pixel_format: bgr
input_tensor: nchw
path: /config/yolo_nas_s.onnx
labelmap_path: /labelmap/coco-80.txt
```

View File

@@ -5,7 +5,7 @@ title: Available Objects
import labels from "../../../labelmap.txt";
Frigate includes the object labels listed below from the Google Coral test data.
Frigate includes the object models listed below from the Google Coral test data.
Please note:

View File

@@ -548,12 +548,10 @@ genai:
# Uses https://github.com/AlexxIT/go2rtc (v1.9.2)
go2rtc:
# Optional: Live stream configuration for WebUI.
# NOTE: Can be overridden at the camera level
# Optional: jsmpeg stream configuration for WebUI
live:
# Optional: Set the name of the stream configured in go2rtc
# that should be used for live view in frigate WebUI. (default: name of camera)
# NOTE: In most cases this should be set at the camera level only.
# Optional: Set the name of the stream that should be used for live view
# in frigate WebUI. (default: name of camera)
stream_name: camera_name
# Optional: Set the height of the jsmpeg stream. (default: 720)
# This must be less than or equal to the height of the detect stream. Lower resolutions

View File

@@ -7,7 +7,7 @@ title: Restream
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.2) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#configuration) for more advanced configurations and features.
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.4) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#configuration) for more advanced configurations and features.
:::note
@@ -134,7 +134,7 @@ cameras:
## Advanced Restream Configurations
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
NOTE: The output will need to be passed with two curly braces `{{output}}`

View File

@@ -19,7 +19,7 @@ For best performance, 16GB or more of RAM and a dedicated GPU are recommended.
## Configuration
Semantic Search is disabled by default, and must be enabled in your config file before it can be used. Semantic Search is a global configuration setting.
Semantic search is disabled by default, and must be enabled in your config file before it can be used. Semantic Search is a global configuration setting.
```yaml
semantic_search:
@@ -41,7 +41,13 @@ The vision model is able to embed both images and text into the same vector spac
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
Differently weighted CLIP models are available and can be selected by setting the `model_size` config option as `small` or `large`:
Differently weighted CLIP models are available and can be selected by setting the `model_size` config option:
:::tip
The CLIP models are downloaded in ONNX format, which means they will be accelerated using GPU hardware when available. This depends on the Docker build that is used. See [the object detector docs](../configuration/object_detectors.md) for more information.
:::
```yaml
semantic_search:
@@ -50,41 +56,11 @@ semantic_search:
```
- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
- Configuring the `small` model employs a quantized version of the model that uses less RAM and runs on CPU with a very negligible difference in embedding quality.
### GPU Acceleration
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used.
```yaml
semantic_search:
enabled: True
model_size: large
```
:::info
If the correct build is used for your GPU and the `large` model is configured, then the GPU will be detected and used automatically.
**NOTE:** Object detection and Semantic Search are independent features. If you want to use your GPU with Semantic Search, you must choose the appropriate Frigate Docker image for your GPU.
- **AMD**
- ROCm will automatically be detected and used for Semantic Search in the `-rocm` Frigate image.
- **Intel**
- OpenVINO will automatically be detected and used for Semantic Search in the default Frigate image.
- **Nvidia**
- Nvidia GPUs will automatically be detected and used for Semantic Search in the `-tensorrt` Frigate image.
- Jetson devices will automatically be detected and used for Semantic Search in the `-tensorrt-jp(4/5)` Frigate image.
:::
- Configuring the `small` model employs a quantized version of the model that uses much less RAM and runs faster on CPU with a very negligible difference in embedding quality.
## Usage and Best Practices
1. Semantic Search is used in conjunction with the other filters available on the Search page. Use a combination of traditional filtering and Semantic Search for the best results.
1. Semantic search is used in conjunction with the other filters available on the Search page. Use a combination of traditional filtering and semantic search for the best results.
2. Use the thumbnail search type when searching for particular objects in the scene. Use the description search type when attempting to discern the intent of your object.
3. Because of how the AI models Frigate uses have been trained, the comparison between text and image embedding distances generally means that with multi-modal (`thumbnail` and `description`) searches, results matching `description` will appear first, even if a `thumbnail` embedding may be a better match. Play with the "Search Type" setting to help find what you are looking for. Note that if you are generating descriptions for specific objects or zones only, this may cause search results to prioritize the objects with descriptions even if the the ones without them are more relevant.
4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day".

View File

@@ -81,15 +81,15 @@ You can calculate the **minimum** shm size for each camera with the following fo
```console
# Replace <width> and <height>
$ python -c 'print("{:.2f}MB".format((<width> * <height> * 1.5 * 20 + 270480) / 1048576))'
$ python -c 'print("{:.2f}MB".format((<width> * <height> * 1.5 * 10 + 270480) / 1048576))'
# Example for 1280x720, including logs
$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 20 + 270480) / 1048576)) + 40'
46.63MB
# Example for 1280x720
$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 10 + 270480) / 1048576))'
13.44MB
# Example for eight cameras detecting at 1280x720, including logs
$ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 20 + 270480) / 1048576) * 8 + 40))'
253MB
$ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 10 + 270480) / 1048576) * 8 + 40))'
136.99MB
```
The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration.
@@ -193,9 +193,8 @@ services:
container_name: frigate
privileged: true # this may not be necessary for all setups
restart: unless-stopped
stop_grace_period: 30s # allow enough time to shut down the various services
image: ghcr.io/blakeblackshear/frigate:stable
shm_size: "512mb" # update for your cameras based on calculation above
shm_size: "64mb" # update for your cameras based on calculation above
devices:
- /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
@@ -225,7 +224,6 @@ If you can't use docker compose, you can run the container with something simila
docker run -d \
--name frigate \
--restart=unless-stopped \
--stop-timeout 30 \
--mount type=tmpfs,target=/tmp/cache,tmpfs-size=1000000000 \
--device /dev/bus/usb:/dev/bus/usb \
--device /dev/dri/renderD128 \

View File

@@ -13,15 +13,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
# Setup a go2rtc stream
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#module-streams), not just rtsp.
:::tip
For the best experience, you should set the stream name under `go2rtc` to match the name of your camera so that Frigate will automatically map it and be able to use better live view options for the camera.
See [the live view docs](../configuration/live.md#setting-stream-for-live-ui) for more information.
:::
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. For the best experience, you should set the stream name under go2rtc to match the name of your camera so that Frigate will automatically map it and be able to use better live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#module-streams), not just rtsp.
```yaml
go2rtc:
@@ -47,8 +39,8 @@ After adding this to the config, restart Frigate and try to watch the live strea
- Check Video Codec:
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#codecs-madness) in go2rtc documentation.
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#codecs-madness) in go2rtc documentation.
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
```yaml
go2rtc:
streams:

View File

@@ -115,7 +115,6 @@ services:
frigate:
container_name: frigate
restart: unless-stopped
stop_grace_period: 30s
image: ghcr.io/blakeblackshear/frigate:stable
volumes:
- ./config:/config
@@ -307,9 +306,7 @@ By default, Frigate will retain video of all tracked objects for 10 days. The fu
### Step 7: Complete config
At this point you have a complete config with basic functionality.
- View [common configuration examples](../configuration/index.md#common-configuration-examples) for a list of common configuration examples.
- View [full config reference](../configuration/reference.md) for a complete list of configuration options.
At this point you have a complete config with basic functionality. You can see the [full config reference](../configuration/reference.md) for a complete list of configuration options.
### Follow up

View File

@@ -94,18 +94,6 @@ Message published for each changed tracked object. The first message is publishe
}
```
### `frigate/tracked_object_update`
Message published for updates to tracked object metadata, for example when GenAI runs and returns a tracked object description.
```json
{
"type": "description",
"id": "1607123955.475377-mxklsc",
"description": "The car is a red sedan moving away from the camera."
}
```
### `frigate/reviews`
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. When additional objects are detected or when a zone change occurs, it will publish a, `update` message with the same id. When the review activity has ended a final `end` message is published.

View File

@@ -5,7 +5,7 @@ title: Requesting your first model
## Step 1: Upload and annotate your images
Before requesting your first model, you will need to upload and verify at least 1 image to Frigate+. The more images you upload, annotate, and verify the better your results will be. Most users start to see very good results once they have at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate.
Before requesting your first model, you will need to upload at least 10 images to Frigate+. But for the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate.
It is recommended to submit **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
@@ -13,7 +13,7 @@ For more detailed recommendations, you can refer to the docs on [improving your
## Step 2: Submit a model request
Once you have an initial set of verified images, you can request a model on the Models page. For guidance on choosing a model type, refer to [this part of the documentation](./index.md#available-model-types). Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours.
Once you have an initial set of verified images, you can request a model on the Models page. Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours.
![Plus Models Page](/img/plus/plus-models.jpg)
## Step 3: Set your model id in the config

View File

@@ -3,7 +3,7 @@ id: improving_model
title: Improving your model
---
You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. With all the new images now being submitted by subscribers, future base models will improve as more and more examples are incorporated. Note that only images with at least one verified label will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. Because a limited number of users submitted images to Frigate+ prior to this launch, you may need to submit several hundred images per camera to see good results. With all the new images now being submitted, future base models will improve as more and more users (including you) submit examples to Frigate+. Note that only verified images will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
- **Submit both true positives and false positives**. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
- **Lower your thresholds a little in order to generate more false/true positives near the threshold value**. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores.
@@ -36,17 +36,18 @@ Misidentified objects should have a correct label added. For example, if a perso
## Shortcuts for a faster workflow
| Shortcut Key | Description |
| ----------------- | ----------------------------- |
| `?` | Show all keyboard shortcuts |
| `w` | Add box |
| `d` | Toggle difficult |
| `s` | Switch to the next label |
| `tab` | Select next largest box |
| `del` | Delete current box |
| `esc` | Deselect/Cancel |
| `← ↑ → ↓` | Move box |
| `Shift + ← ↑ → ↓` | Resize box |
| `scrollwheel` | Zoom in/out |
| `f` | Hide/show all but current box |
| `spacebar` | Verify and save |
|Shortcut Key|Description|
|-----|--------|
|`?`|Show all keyboard shortcuts|
|`w`|Add box|
|`d`|Toggle difficult|
|`s`|Switch to the next label|
|`tab`|Select next largest box|
|`del`|Delete current box|
|`esc`|Deselect/Cancel|
|`← ↑ → ↓`|Move box|
|`Shift + ← ↑ → ↓`|Resize box|
|`-`|Zoom out|
|`=`|Zoom in|
|`f`|Hide/show all but current box|
|`spacebar`|Verify and save|

View File

@@ -15,36 +15,17 @@ With a subscription, 12 model trainings per year are included. If you cancel you
Information on how to integrate Frigate+ with Frigate can be found in the [integration docs](../integrations/plus.md).
## Available model types
There are two model types offered in Frigate+: `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types).
| Model Type | Description |
| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
## Supported detector types
Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), ONNX (`onnx`), and ROCm (`rocm`) detectors.
:::warning
Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15, which is still under development.
Frigate+ models are not supported for TensorRT or OpenVino yet.
:::
| Hardware | Recommended Detector Type | Recommended Model Type |
| ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ---------------------- |
| [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended) | `cpu` | `mobiledet` |
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` |
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` |
| [NVidia GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` |
| [AMD ROCm GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#amdrocm-gpu-detector)\* | `rocm` | `yolonas` |
Currently, Frigate+ models only support CPU (`cpu`) and Coral (`edgetpu`) models. OpenVino is next in line to gain support.
_\* Requires Frigate 0.15_
The models are created using the same MobileDet architecture as the default model. Additional architectures will be added in future releases as needed.
## Available label types

View File

@@ -49,10 +49,7 @@ The USB Coral can become stuck and need to be restarted, this can happen for a n
## PCIe Coral Not Detected
The most common reason for the PCIe Coral not being detected is that the driver has not been installed. This process varies based on what OS and kernel that is being run.
- In most cases [the Coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) show how to install the driver for the PCIe based Coral.
- For Ubuntu 22.04+ https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
The most common reason for the PCIe coral not being detected is that the driver has not been installed. See [the coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) for how to install the driver for the PCIe based coral.
## Only One PCIe Coral Is Detected With Coral Dual EdgeTPU

View File

@@ -26,7 +26,7 @@ const sidebars: SidebarsConfig = {
{
type: 'link',
label: 'Go2RTC Configuration Reference',
href: 'https://github.com/AlexxIT/go2rtc/tree/v1.9.2#configuration',
href: 'https://github.com/AlexxIT/go2rtc/tree/v1.9.4#configuration',
} as PropSidebarItemLink,
],
Detectors: [

View File

@@ -7,7 +7,7 @@ info:
servers:
- url: https://demo.frigate.video/api
- url: http://localhost:5001/api
- url: http://localhost:5001/
paths:
/auth:
@@ -172,65 +172,76 @@ paths:
in: query
required: false
schema:
type: string
anyOf:
- type: string
- type: 'null'
default: all
title: Cameras
- name: labels
in: query
required: false
schema:
type: string
anyOf:
- type: string
- type: 'null'
default: all
title: Labels
- name: zones
in: query
required: false
schema:
type: string
anyOf:
- type: string
- type: 'null'
default: all
title: Zones
- name: reviewed
in: query
required: false
schema:
type: integer
anyOf:
- type: integer
- type: 'null'
default: 0
title: Reviewed
- name: limit
in: query
required: false
schema:
type: integer
anyOf:
- type: integer
- type: 'null'
title: Limit
- name: severity
in: query
required: false
schema:
allOf:
- $ref: '#/components/schemas/SeverityEnum'
anyOf:
- type: string
- type: 'null'
title: Severity
- name: before
in: query
required: false
schema:
type: number
anyOf:
- type: number
- type: 'null'
title: Before
- name: after
in: query
required: false
schema:
type: number
anyOf:
- type: number
- type: 'null'
title: After
responses:
'200':
description: Successful Response
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/ReviewSegmentResponse'
title: Response Review Review Get
schema: { }
'422':
description: Validation Error
content:
@@ -248,28 +259,36 @@ paths:
in: query
required: false
schema:
type: string
anyOf:
- type: string
- type: 'null'
default: all
title: Cameras
- name: labels
in: query
required: false
schema:
type: string
anyOf:
- type: string
- type: 'null'
default: all
title: Labels
- name: zones
in: query
required: false
schema:
type: string
anyOf:
- type: string
- type: 'null'
default: all
title: Zones
- name: timezone
in: query
required: false
schema:
type: string
anyOf:
- type: string
- type: 'null'
default: utc
title: Timezone
responses:
@@ -277,8 +296,7 @@ paths:
description: Successful Response
content:
application/json:
schema:
$ref: '#/components/schemas/ReviewSummaryResponse'
schema: { }
'422':
description: Validation Error
content:
@@ -292,18 +310,17 @@ paths:
summary: Set Multiple Reviewed
operationId: set_multiple_reviewed_reviews_viewed_post
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/ReviewModifyMultipleBody'
type: object
title: Body
responses:
'200':
description: Successful Response
content:
application/json:
schema:
$ref: '#/components/schemas/GenericResponse'
schema: { }
'422':
description: Validation Error
content:
@@ -317,18 +334,17 @@ paths:
summary: Delete Reviews
operationId: delete_reviews_reviews_delete_post
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/ReviewModifyMultipleBody'
type: object
title: Body
responses:
'200':
description: Successful Response
content:
application/json:
schema:
$ref: '#/components/schemas/GenericResponse'
schema: { }
'422':
description: Validation Error
content:
@@ -347,38 +363,96 @@ paths:
in: query
required: false
schema:
type: string
anyOf:
- type: string
- type: 'null'
default: all
title: Cameras
- name: before
in: query
required: false
schema:
type: number
anyOf:
- type: number
- type: 'null'
title: Before
- name: after
in: query
required: false
schema:
type: number
anyOf:
- type: number
- type: 'null'
title: After
- name: scale
in: query
required: false
schema:
type: integer
anyOf:
- type: integer
- type: 'null'
default: 30
title: Scale
responses:
'200':
description: Successful Response
content:
application/json:
schema: { }
'422':
description: Validation Error
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/ReviewActivityMotionResponse'
title: Response Motion Activity Review Activity Motion Get
$ref: '#/components/schemas/HTTPValidationError'
/review/activity/audio:
get:
tags:
- Review
summary: Audio Activity
description: Get motion and audio activity.
operationId: audio_activity_review_activity_audio_get
parameters:
- name: cameras
in: query
required: false
schema:
anyOf:
- type: string
- type: 'null'
default: all
title: Cameras
- name: before
in: query
required: false
schema:
anyOf:
- type: number
- type: 'null'
title: Before
- name: after
in: query
required: false
schema:
anyOf:
- type: number
- type: 'null'
title: After
- name: scale
in: query
required: false
schema:
anyOf:
- type: integer
- type: 'null'
default: 30
title: Scale
responses:
'200':
description: Successful Response
content:
application/json:
schema: { }
'422':
description: Validation Error
content:
@@ -403,60 +477,57 @@ paths:
description: Successful Response
content:
application/json:
schema:
$ref: '#/components/schemas/ReviewSegmentResponse'
schema: { }
'422':
description: Validation Error
content:
application/json:
schema:
$ref: '#/components/schemas/HTTPValidationError'
/review/{review_id}:
/review/{event_id}:
get:
tags:
- Review
summary: Get Review
operationId: get_review_review__review_id__get
operationId: get_review_review__event_id__get
parameters:
- name: review_id
- name: event_id
in: path
required: true
schema:
type: string
title: Review Id
title: Event Id
responses:
'200':
description: Successful Response
content:
application/json:
schema:
$ref: '#/components/schemas/ReviewSegmentResponse'
schema: { }
'422':
description: Validation Error
content:
application/json:
schema:
$ref: '#/components/schemas/HTTPValidationError'
/review/{review_id}/viewed:
/review/{event_id}/viewed:
delete:
tags:
- Review
summary: Set Not Reviewed
operationId: set_not_reviewed_review__review_id__viewed_delete
operationId: set_not_reviewed_review__event_id__viewed_delete
parameters:
- name: review_id
- name: event_id
in: path
required: true
schema:
type: string
title: Review Id
title: Event Id
responses:
'200':
description: Successful Response
content:
application/json:
schema:
$ref: '#/components/schemas/GenericResponse'
schema: { }
'422':
description: Validation Error
content:
@@ -692,25 +763,13 @@ paths:
content:
application/json:
schema: { }
/nvinfo:
get:
tags:
- App
summary: Nvinfo
operationId: nvinfo_nvinfo_get
responses:
'200':
description: Successful Response
content:
application/json:
schema: { }
/logs/{service}:
get:
tags:
- App
- Logs
summary: Logs
description: Get logs for the requested service (frigate/nginx/go2rtc)
description: Get logs for the requested service (frigate/nginx/go2rtc/chroma)
operationId: logs_logs__service__get
parameters:
- name: service
@@ -722,6 +781,7 @@ paths:
- frigate
- nginx
- go2rtc
- chroma
title: Service
- name: download
in: query
@@ -982,8 +1042,7 @@ paths:
- Preview
summary: Preview Hour
description: Get all mp4 previews relevant for time period given the timezone
operationId: >-
preview_hour_preview__year_month___day___hour___camera_name___tz_name__get
operationId: preview_hour_preview__year_month___day___hour___camera_name___tz_name__get
parameters:
- name: year_month
in: path
@@ -1033,8 +1092,7 @@ paths:
- Preview
summary: Get Preview Frames From Cache
description: Get list of cached preview frames
operationId: >-
get_preview_frames_from_cache_preview__camera_name__start__start_ts__end__end_ts__frames_get
operationId: get_preview_frames_from_cache_preview__camera_name__start__start_ts__end__end_ts__frames_get
parameters:
- name: camera_name
in: path
@@ -1119,8 +1177,7 @@ paths:
tags:
- Export
summary: Export Recording
operationId: >-
export_recording_export__camera_name__start__start_time__end__end_time__post
operationId: export_recording_export__camera_name__start__start_time__end__end_time__post
parameters:
- name: camera_name
in: path
@@ -1141,11 +1198,11 @@ paths:
type: number
title: End Time
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/ExportRecordingsBody'
type: object
title: Body
responses:
'200':
description: Successful Response
@@ -1408,14 +1465,6 @@ paths:
- type: number
- type: 'null'
title: Max Length
- name: event_id
in: query
required: false
schema:
anyOf:
- type: string
- type: 'null'
title: Event Id
- name: sort
in: query
required: false
@@ -1526,7 +1575,7 @@ paths:
anyOf:
- type: string
- type: 'null'
default: thumbnail
default: thumbnail,description
title: Search Type
- name: include_thumbnails
in: query
@@ -1598,22 +1647,6 @@ paths:
- type: 'null'
default: 00:00,24:00
title: Time Range
- name: has_clip
in: query
required: false
schema:
anyOf:
- type: boolean
- type: 'null'
title: Has Clip
- name: has_snapshot
in: query
required: false
schema:
anyOf:
- type: boolean
- type: 'null'
title: Has Snapshot
- name: timezone
in: query
required: false
@@ -1623,30 +1656,6 @@ paths:
- type: 'null'
default: utc
title: Timezone
- name: min_score
in: query
required: false
schema:
anyOf:
- type: number
- type: 'null'
title: Min Score
- name: max_score
in: query
required: false
schema:
anyOf:
- type: number
- type: 'null'
title: Max Score
- name: sort
in: query
required: false
schema:
anyOf:
- type: string
- type: 'null'
title: Sort
responses:
'200':
description: Successful Response
@@ -1933,15 +1942,6 @@ paths:
schema:
type: string
title: Event Id
- name: source
in: query
required: false
schema:
anyOf:
- $ref: '#/components/schemas/RegenerateDescriptionEnum'
- type: 'null'
default: thumbnails
title: Source
responses:
'200':
description: Successful Response
@@ -2029,12 +2029,12 @@ paths:
application/json:
schema:
$ref: '#/components/schemas/HTTPValidationError'
/{camera_name}:
'{camera_name}':
get:
tags:
- Media
summary: Mjpeg Feed
operationId: mjpeg_feed__camera_name__get
operationId: mjpeg_feed_camera_name__get
parameters:
- name: camera_name
in: path
@@ -2241,8 +2241,7 @@ paths:
tags:
- Media
summary: Get Snapshot From Recording
operationId: >-
get_snapshot_from_recording__camera_name__recordings__frame_time__snapshot__format__get
operationId: get_snapshot_from_recording__camera_name__recordings__frame_time__snapshot__format__get
parameters:
- name: camera_name
in: path
@@ -2364,9 +2363,7 @@ paths:
tags:
- Media
summary: Recordings
description: >-
Return specific camera recordings between the given 'after'/'end' times.
If not provided the last hour will be used
description: Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used
operationId: recordings__camera_name__recordings_get
parameters:
- name: camera_name
@@ -2380,14 +2377,14 @@ paths:
required: false
schema:
type: number
default: 1731275308.238304
default: 1727542549.303557
title: After
- name: before
in: query
required: false
schema:
type: number
default: 1731278908.238313
default: 1727546149.303926
title: Before
responses:
'200':
@@ -2426,6 +2423,13 @@ paths:
schema:
type: number
title: End Ts
- name: download
in: query
required: false
schema:
type: boolean
default: false
title: Download
responses:
'200':
description: Successful Response
@@ -2796,6 +2800,13 @@ paths:
schema:
type: string
title: Event Id
- name: download
in: query
required: false
schema:
type: boolean
default: false
title: Download
responses:
'200':
description: Successful Response
@@ -3110,9 +3121,7 @@ paths:
tags:
- Media
summary: Label Snapshot
description: >-
Returns the snapshot image from the latest event for the given camera
and label combo
description: Returns the snapshot image from the latest event for the given camera and label combo
operationId: label_snapshot__camera_name___label__snapshot_jpg_get
parameters:
- name: camera_name
@@ -3184,32 +3193,6 @@ components:
required:
- password
title: AppPutPasswordBody
DayReview:
properties:
day:
type: string
format: date-time
title: Day
reviewed_alert:
type: integer
title: Reviewed Alert
reviewed_detection:
type: integer
title: Reviewed Detection
total_alert:
type: integer
title: Total Alert
total_detection:
type: integer
title: Total Detection
type: object
required:
- day
- reviewed_alert
- reviewed_detection
- total_alert
- total_detection
title: DayReview
EventsCreateBody:
properties:
source_type:
@@ -3225,7 +3208,7 @@ components:
title: Sub Label
score:
anyOf:
- type: number
- type: integer
- type: 'null'
title: Score
default: 0
@@ -3254,6 +3237,7 @@ components:
description:
anyOf:
- type: string
minLength: 1
- type: 'null'
title: The description of the event
type: object
@@ -3264,7 +3248,7 @@ components:
properties:
end_time:
anyOf:
- type: number
- type: integer
- type: 'null'
title: End Time
type: object
@@ -3286,27 +3270,6 @@ components:
required:
- subLabel
title: EventsSubLabelBody
ExportRecordingsBody:
properties:
playback:
allOf:
- $ref: '#/components/schemas/PlaybackFactorEnum'
title: Playback factor
default: realtime
source:
allOf:
- $ref: '#/components/schemas/PlaybackSourceEnum'
title: Playback source
default: recordings
name:
type: string
maxLength: 256
title: Friendly name
image_path:
type: string
title: Image Path
type: object
title: ExportRecordingsBody
Extension:
type: string
enum:
@@ -3315,19 +3278,6 @@ components:
- jpg
- jpeg
title: Extension
GenericResponse:
properties:
success:
type: boolean
title: Success
message:
type: string
title: Message
type: object
required:
- success
- message
title: GenericResponse
HTTPValidationError:
properties:
detail:
@@ -3337,132 +3287,6 @@ components:
title: Detail
type: object
title: HTTPValidationError
Last24HoursReview:
properties:
reviewed_alert:
type: integer
title: Reviewed Alert
reviewed_detection:
type: integer
title: Reviewed Detection
total_alert:
type: integer
title: Total Alert
total_detection:
type: integer
title: Total Detection
type: object
required:
- reviewed_alert
- reviewed_detection
- total_alert
- total_detection
title: Last24HoursReview
PlaybackFactorEnum:
type: string
enum:
- realtime
- timelapse_25x
title: PlaybackFactorEnum
PlaybackSourceEnum:
type: string
enum:
- recordings
- preview
title: PlaybackSourceEnum
RegenerateDescriptionEnum:
type: string
enum:
- thumbnails
- snapshot
title: RegenerateDescriptionEnum
ReviewActivityMotionResponse:
properties:
start_time:
type: integer
title: Start Time
motion:
type: number
title: Motion
camera:
type: string
title: Camera
type: object
required:
- start_time
- motion
- camera
title: ReviewActivityMotionResponse
ReviewModifyMultipleBody:
properties:
ids:
items:
type: string
minLength: 1
type: array
minItems: 1
title: Ids
type: object
required:
- ids
title: ReviewModifyMultipleBody
ReviewSegmentResponse:
properties:
id:
type: string
title: Id
camera:
type: string
title: Camera
start_time:
type: string
format: date-time
title: Start Time
end_time:
type: string
format: date-time
title: End Time
has_been_reviewed:
type: boolean
title: Has Been Reviewed
severity:
$ref: '#/components/schemas/SeverityEnum'
thumb_path:
type: string
title: Thumb Path
data:
title: Data
type: object
required:
- id
- camera
- start_time
- end_time
- has_been_reviewed
- severity
- thumb_path
- data
title: ReviewSegmentResponse
ReviewSummaryResponse:
properties:
last24Hours:
$ref: '#/components/schemas/Last24HoursReview'
root:
additionalProperties:
$ref: '#/components/schemas/DayReview'
type: object
title: Root
type: object
required:
- last24Hours
- root
title: ReviewSummaryResponse
SeverityEnum:
type: string
enum:
- alert
- detection
title: SeverityEnum
SubmitPlusBody:
properties:
include_annotation:

View File

@@ -1,4 +1,4 @@
from typing import List, Optional, Union
from typing import Optional, Union
from pydantic import BaseModel, Field
@@ -17,18 +17,14 @@ class EventsDescriptionBody(BaseModel):
class EventsCreateBody(BaseModel):
source_type: Optional[str] = "api"
sub_label: Optional[str] = None
score: Optional[float] = 0
score: Optional[int] = 0
duration: Optional[int] = 30
include_recording: Optional[bool] = True
draw: Optional[dict] = {}
class EventsEndBody(BaseModel):
end_time: Optional[float] = None
class EventsDeleteBody(BaseModel):
event_ids: List[str] = Field(title="The event IDs to delete")
end_time: Optional[int] = None
class SubmitPlusBody(BaseModel):

View File

@@ -28,7 +28,6 @@ class EventsQueryParams(BaseModel):
is_submitted: Optional[int] = None
min_length: Optional[float] = None
max_length: Optional[float] = None
event_id: Optional[str] = None
sort: Optional[str] = None
timezone: Optional[str] = "utc"
@@ -47,7 +46,6 @@ class EventsSearchQueryParams(BaseModel):
time_range: Optional[str] = DEFAULT_TIME_RANGE
has_clip: Optional[bool] = None
has_snapshot: Optional[bool] = None
is_submitted: Optional[bool] = None
timezone: Optional[str] = "utc"
min_score: Optional[float] = None
max_score: Optional[float] = None

View File

@@ -1,6 +0,0 @@
from pydantic import BaseModel
class GenericResponse(BaseModel):
success: bool
message: str

View File

@@ -1,20 +0,0 @@
from typing import Union
from pydantic import BaseModel, Field
from pydantic.json_schema import SkipJsonSchema
from frigate.record.export import (
PlaybackFactorEnum,
PlaybackSourceEnum,
)
class ExportRecordingsBody(BaseModel):
playback: PlaybackFactorEnum = Field(
default=PlaybackFactorEnum.realtime, title="Playback factor"
)
source: PlaybackSourceEnum = Field(
default=PlaybackSourceEnum.recordings, title="Playback source"
)
name: str = Field(title="Friendly name", default=None, max_length=256)
image_path: Union[str, SkipJsonSchema[None]] = None

View File

@@ -1,6 +0,0 @@
from pydantic import BaseModel, conlist, constr
class ReviewModifyMultipleBody(BaseModel):
# List of string with at least one element and each element with at least one char
ids: conlist(constr(min_length=1), min_length=1)

View File

@@ -1,31 +1,28 @@
from typing import Union
from typing import Optional
from pydantic import BaseModel
from pydantic.json_schema import SkipJsonSchema
from frigate.review.maintainer import SeverityEnum
class ReviewQueryParams(BaseModel):
cameras: str = "all"
labels: str = "all"
zones: str = "all"
reviewed: int = 0
limit: Union[int, SkipJsonSchema[None]] = None
severity: Union[SeverityEnum, SkipJsonSchema[None]] = None
before: Union[float, SkipJsonSchema[None]] = None
after: Union[float, SkipJsonSchema[None]] = None
cameras: Optional[str] = "all"
labels: Optional[str] = "all"
zones: Optional[str] = "all"
reviewed: Optional[int] = 0
limit: Optional[int] = None
severity: Optional[str] = None
before: Optional[float] = None
after: Optional[float] = None
class ReviewSummaryQueryParams(BaseModel):
cameras: str = "all"
labels: str = "all"
zones: str = "all"
timezone: str = "utc"
cameras: Optional[str] = "all"
labels: Optional[str] = "all"
zones: Optional[str] = "all"
timezone: Optional[str] = "utc"
class ReviewActivityMotionQueryParams(BaseModel):
cameras: str = "all"
before: Union[float, SkipJsonSchema[None]] = None
after: Union[float, SkipJsonSchema[None]] = None
scale: int = 30
cameras: Optional[str] = "all"
before: Optional[float] = None
after: Optional[float] = None
scale: Optional[int] = 30

View File

@@ -1,43 +0,0 @@
from datetime import datetime
from typing import Dict
from pydantic import BaseModel, Json
from frigate.review.maintainer import SeverityEnum
class ReviewSegmentResponse(BaseModel):
id: str
camera: str
start_time: datetime
end_time: datetime
has_been_reviewed: bool
severity: SeverityEnum
thumb_path: str
data: Json
class Last24HoursReview(BaseModel):
reviewed_alert: int
reviewed_detection: int
total_alert: int
total_detection: int
class DayReview(BaseModel):
day: datetime
reviewed_alert: int
reviewed_detection: int
total_alert: int
total_detection: int
class ReviewSummaryResponse(BaseModel):
last24Hours: Last24HoursReview
root: Dict[str, DayReview]
class ReviewActivityMotionResponse(BaseModel):
start_time: int
motion: float
camera: str

View File

@@ -16,7 +16,6 @@ from playhouse.shortcuts import model_to_dict
from frigate.api.defs.events_body import (
EventsCreateBody,
EventsDeleteBody,
EventsDescriptionBody,
EventsEndBody,
EventsSubLabelBody,
@@ -36,9 +35,8 @@ from frigate.const import (
CLIPS_DIR,
)
from frigate.embeddings import EmbeddingsContext
from frigate.events.external import ExternalEventProcessor
from frigate.models import Event, ReviewSegment, Timeline
from frigate.object_processing import TrackedObject, TrackedObjectProcessor
from frigate.object_processing import TrackedObject
from frigate.util.builtin import get_tz_modifiers
logger = logging.getLogger(__name__)
@@ -90,7 +88,6 @@ def events(params: EventsQueryParams = Depends()):
is_submitted = params.is_submitted
min_length = params.min_length
max_length = params.max_length
event_id = params.event_id
sort = params.sort
@@ -233,9 +230,6 @@ def events(params: EventsQueryParams = Depends()):
elif is_submitted > 0:
clauses.append((Event.plus_id != ""))
if event_id is not None:
clauses.append((Event.id == event_id))
if len(clauses) == 0:
clauses.append((True))
@@ -362,7 +356,6 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
time_range = params.time_range
has_clip = params.has_clip
has_snapshot = params.has_snapshot
is_submitted = params.is_submitted
# for similarity search
event_id = params.event_id
@@ -401,7 +394,6 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
Event.end_time,
Event.has_clip,
Event.has_snapshot,
Event.top_score,
Event.data,
Event.plus_id,
ReviewSegment.thumb_path,
@@ -444,12 +436,6 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
if has_snapshot is not None:
event_filters.append((Event.has_snapshot == has_snapshot))
if is_submitted is not None:
if is_submitted == 0:
event_filters.append((Event.plus_id.is_null()))
elif is_submitted > 0:
event_filters.append((Event.plus_id != ""))
if min_score is not None and max_score is not None:
event_filters.append((Event.data["score"].between(min_score, max_score)))
else:
@@ -1005,11 +991,9 @@ def regenerate_description(
status_code=404,
)
camera_config = request.app.frigate_config.cameras[event.camera]
if (
request.app.frigate_config.semantic_search.enabled
and camera_config.genai.enabled
and request.app.frigate_config.genai.enabled
):
request.app.event_metadata_updater.publish((event.id, params.source))
@@ -1030,71 +1014,44 @@ def regenerate_description(
content=(
{
"success": False,
"message": "Semantic Search and Generative AI must be enabled to regenerate a description",
"message": "Semantic search and generative AI are not enabled",
}
),
status_code=400,
)
def delete_single_event(event_id: str, request: Request) -> dict:
@router.delete("/events/{event_id}")
def delete_event(request: Request, event_id: str):
try:
event = Event.get(Event.id == event_id)
except DoesNotExist:
return {"success": False, "message": f"Event {event_id} not found"}
return JSONResponse(
content=({"success": False, "message": "Event " + event_id + " not found"}),
status_code=404,
)
media_name = f"{event.camera}-{event.id}"
if event.has_snapshot:
snapshot_paths = [
Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg"),
Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"),
]
for media in snapshot_paths:
media.unlink(missing_ok=True)
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
media.unlink(missing_ok=True)
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
media.unlink(missing_ok=True)
if event.has_clip:
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
media.unlink(missing_ok=True)
event.delete_instance()
Timeline.delete().where(Timeline.source_id == event_id).execute()
# If semantic search is enabled, update the index
if request.app.frigate_config.semantic_search.enabled:
context: EmbeddingsContext = request.app.embeddings
context.db.delete_embeddings_thumbnail(event_ids=[event_id])
context.db.delete_embeddings_description(event_ids=[event_id])
return {"success": True, "message": f"Event {event_id} deleted"}
@router.delete("/events/{event_id}")
def delete_event(request: Request, event_id: str):
result = delete_single_event(event_id, request)
status_code = 200 if result["success"] else 404
return JSONResponse(content=result, status_code=status_code)
@router.delete("/events/")
def delete_events(request: Request, body: EventsDeleteBody):
if not body.event_ids:
return JSONResponse(
content=({"success": False, "message": "No event IDs provided."}),
status_code=404,
)
deleted_events = []
not_found_events = []
for event_id in body.event_ids:
result = delete_single_event(event_id, request)
if result["success"]:
deleted_events.append(event_id)
else:
not_found_events.append(event_id)
response = {
"success": True,
"deleted_events": deleted_events,
"not_found_events": not_found_events,
}
return JSONResponse(content=response, status_code=200)
return JSONResponse(
content=({"success": True, "message": "Event " + event_id + " deleted"}),
status_code=200,
)
@router.post("/events/{camera_name}/{label}/create")
@@ -1119,11 +1076,9 @@ def create_event(
)
try:
frame_processor: TrackedObjectProcessor = request.app.detected_frames_processor
external_processor: ExternalEventProcessor = request.app.external_processor
frame = request.app.detected_frames_processor.get_current_frame(camera_name)
frame = frame_processor.get_current_frame(camera_name)
event_id = external_processor.create_manual_event(
event_id = request.app.external_processor.create_manual_event(
camera_name,
label,
body.source_type,

View File

@@ -4,22 +4,17 @@ import logging
import random
import string
from pathlib import Path
from typing import Optional
import psutil
from fastapi import APIRouter, Request
from fastapi.responses import JSONResponse
from peewee import DoesNotExist
from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody
from frigate.api.defs.tags import Tags
from frigate.const import EXPORT_DIR
from frigate.models import Export, Previews, Recordings
from frigate.record.export import (
PlaybackFactorEnum,
PlaybackSourceEnum,
RecordingExporter,
)
from frigate.util.builtin import is_current_hour
from frigate.models import Export, Recordings
from frigate.record.export import PlaybackFactorEnum, RecordingExporter
logger = logging.getLogger(__name__)
@@ -38,7 +33,7 @@ def export_recording(
camera_name: str,
start_time: float,
end_time: float,
body: ExportRecordingsBody,
body: dict = None,
):
if not camera_name or not request.app.frigate_config.cameras.get(camera_name):
return JSONResponse(
@@ -48,52 +43,36 @@ def export_recording(
status_code=404,
)
playback_factor = body.playback
playback_source = body.source
friendly_name = body.name
existing_image = body.image_path
json: dict[str, any] = body or {}
playback_factor = json.get("playback", "realtime")
friendly_name: Optional[str] = json.get("name")
if playback_source == "recordings":
recordings_count = (
Recordings.select()
.where(
Recordings.start_time.between(start_time, end_time)
| Recordings.end_time.between(start_time, end_time)
| (
(start_time > Recordings.start_time)
& (end_time < Recordings.end_time)
)
)
.where(Recordings.camera == camera_name)
.count()
if len(friendly_name or "") > 256:
return JSONResponse(
content=({"success": False, "message": "File name is too long."}),
status_code=401,
)
if recordings_count <= 0:
return JSONResponse(
content=(
{"success": False, "message": "No recordings found for time range"}
),
status_code=400,
)
else:
previews_count = (
Previews.select()
.where(
Previews.start_time.between(start_time, end_time)
| Previews.end_time.between(start_time, end_time)
| ((start_time > Previews.start_time) & (end_time < Previews.end_time))
)
.where(Previews.camera == camera_name)
.count()
)
existing_image = json.get("image_path")
if not is_current_hour(start_time) and previews_count <= 0:
return JSONResponse(
content=(
{"success": False, "message": "No previews found for time range"}
),
status_code=400,
)
recordings_count = (
Recordings.select()
.where(
Recordings.start_time.between(start_time, end_time)
| Recordings.end_time.between(start_time, end_time)
| ((start_time > Recordings.start_time) & (end_time < Recordings.end_time))
)
.where(Recordings.camera == camera_name)
.count()
)
if recordings_count <= 0:
return JSONResponse(
content=(
{"success": False, "message": "No recordings found for time range"}
),
status_code=400,
)
export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}"
exporter = RecordingExporter(
@@ -109,11 +88,6 @@ def export_recording(
if playback_factor in PlaybackFactorEnum.__members__.values()
else PlaybackFactorEnum.realtime
),
(
PlaybackSourceEnum[playback_source]
if playback_source in PlaybackSourceEnum.__members__.values()
else PlaybackSourceEnum.recordings
),
)
exporter.start()
return JSONResponse(

View File

@@ -82,10 +82,6 @@ def create_fastapi_app(
database.close()
return response
@app.on_event("startup")
async def startup():
logger.info("FastAPI started")
# Rate limiter (used for login endpoint)
auth.rateLimiter.set_limit(frigate_config.auth.failed_login_rate_limit or "")
app.state.limiter = limiter

View File

@@ -36,7 +36,6 @@ from frigate.const import (
RECORD_DIR,
)
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
from frigate.object_processing import TrackedObjectProcessor
from frigate.util.builtin import get_tz_modifiers
from frigate.util.image import get_image_from_recording
@@ -80,11 +79,7 @@ def mjpeg_feed(
def imagestream(
detected_frames_processor: TrackedObjectProcessor,
camera_name: str,
fps: int,
height: int,
draw_options: dict[str, any],
detected_frames_processor, camera_name: str, fps: int, height: int, draw_options
):
while True:
# max out at specified FPS
@@ -123,7 +118,6 @@ def latest_frame(
extension: Extension,
params: MediaLatestFrameQueryParams = Depends(),
):
frame_processor: TrackedObjectProcessor = request.app.detected_frames_processor
draw_options = {
"bounding_boxes": params.bbox,
"timestamp": params.timestamp,
@@ -135,14 +129,17 @@ def latest_frame(
quality = params.quality
if camera_name in request.app.frigate_config.cameras:
frame = frame_processor.get_current_frame(camera_name, draw_options)
frame = request.app.detected_frames_processor.get_current_frame(
camera_name, draw_options
)
retry_interval = float(
request.app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval
or 10
)
if frame is None or datetime.now().timestamp() > (
frame_processor.get_current_frame_time(camera_name) + retry_interval
request.app.detected_frames_processor.get_current_frame_time(camera_name)
+ retry_interval
):
if request.app.camera_error_image is None:
error_image = glob.glob("/opt/frigate/frigate/images/camera-error.jpg")
@@ -183,7 +180,7 @@ def latest_frame(
)
elif camera_name == "birdseye" and request.app.frigate_config.birdseye.restream:
frame = cv2.cvtColor(
frame_processor.get_current_frame(camera_name),
request.app.detected_frames_processor.get_current_frame(camera_name),
cv2.COLOR_YUV2BGR_I420,
)
@@ -463,8 +460,8 @@ def recording_clip(
text=False,
) as ffmpeg:
while True:
data = ffmpeg.stdout.read(8192)
if data is not None and len(data) > 0:
data = ffmpeg.stdout.read(1024)
if data is not None:
yield data
else:
if ffmpeg.returncode and ffmpeg.returncode != 0:
@@ -816,15 +813,15 @@ def grid_snapshot(
):
if camera_name in request.app.frigate_config.cameras:
detect = request.app.frigate_config.cameras[camera_name].detect
frame_processor: TrackedObjectProcessor = request.app.detected_frames_processor
frame = frame_processor.get_current_frame(camera_name, {})
frame = request.app.detected_frames_processor.get_current_frame(camera_name, {})
retry_interval = float(
request.app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval
or 10
)
if frame is None or datetime.now().timestamp() > (
frame_processor.get_current_frame_time(camera_name) + retry_interval
request.app.detected_frames_processor.get_current_frame_time(camera_name)
+ retry_interval
):
return JSONResponse(
content={"success": False, "message": "Unable to get valid frame"},
@@ -920,7 +917,7 @@ def grid_snapshot(
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
return Response(
jpg.tobytes(),
jpg.tobytes,
media_type="image/jpeg",
headers={"Cache-Control": "no-store"},
)
@@ -1456,6 +1453,7 @@ def preview_thumbnail(file_name: str):
return Response(
jpg_bytes,
# FIXME: Shouldn't it be either jpg or webp depending on the endpoint?
media_type="image/webp",
headers={
"Content-Type": "image/webp",
@@ -1484,7 +1482,7 @@ def label_thumbnail(request: Request, camera_name: str, label: str):
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
return Response(
jpg.tobytes(),
jpg.tobytes,
media_type="image/jpeg",
headers={"Cache-Control": "no-store"},
)
@@ -1537,6 +1535,6 @@ def label_snapshot(request: Request, camera_name: str, label: str):
_, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
return Response(
jpg.tobytes(),
jpg.tobytes,
media_type="image/jpeg",
)

View File

@@ -12,18 +12,11 @@ from fastapi.responses import JSONResponse
from peewee import Case, DoesNotExist, fn, operator
from playhouse.shortcuts import model_to_dict
from frigate.api.defs.generic_response import GenericResponse
from frigate.api.defs.review_body import ReviewModifyMultipleBody
from frigate.api.defs.review_query_parameters import (
ReviewActivityMotionQueryParams,
ReviewQueryParams,
ReviewSummaryQueryParams,
)
from frigate.api.defs.review_responses import (
ReviewActivityMotionResponse,
ReviewSegmentResponse,
ReviewSummaryResponse,
)
from frigate.api.defs.tags import Tags
from frigate.models import Recordings, ReviewSegment
from frigate.util.builtin import get_tz_modifiers
@@ -33,7 +26,7 @@ logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.review])
@router.get("/review", response_model=list[ReviewSegmentResponse])
@router.get("/review")
def review(params: ReviewQueryParams = Depends()):
cameras = params.cameras
labels = params.labels
@@ -109,7 +102,7 @@ def review(params: ReviewQueryParams = Depends()):
return JSONResponse(content=[r for r in review])
@router.get("/review/summary", response_model=ReviewSummaryResponse)
@router.get("/review/summary")
def review_summary(params: ReviewSummaryQueryParams = Depends()):
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
@@ -180,6 +173,18 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
0,
)
).alias("reviewed_detection"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == "significant_motion"),
ReviewSegment.has_been_reviewed,
)
],
0,
)
).alias("reviewed_motion"),
fn.SUM(
Case(
None,
@@ -204,6 +209,18 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
0,
)
).alias("total_detection"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == "significant_motion"),
1,
)
],
0,
)
).alias("total_motion"),
)
.where(reduce(operator.and_, clauses))
.dicts()
@@ -265,6 +282,18 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
0,
)
).alias("reviewed_detection"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == "significant_motion"),
ReviewSegment.has_been_reviewed,
)
],
0,
)
).alias("reviewed_motion"),
fn.SUM(
Case(
None,
@@ -289,6 +318,18 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
0,
)
).alias("total_detection"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == "significant_motion"),
1,
)
],
0,
)
).alias("total_motion"),
)
.where(reduce(operator.and_, clauses))
.group_by(
@@ -307,10 +348,19 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
return JSONResponse(content=data)
@router.post("/reviews/viewed", response_model=GenericResponse)
def set_multiple_reviewed(body: ReviewModifyMultipleBody):
@router.post("/reviews/viewed")
def set_multiple_reviewed(body: dict = None):
json: dict[str, any] = body or {}
list_of_ids = json.get("ids", "")
if not list_of_ids or len(list_of_ids) == 0:
return JSONResponse(
context=({"success": False, "message": "Not a valid list of ids"}),
status_code=404,
)
ReviewSegment.update(has_been_reviewed=True).where(
ReviewSegment.id << body.ids
ReviewSegment.id << list_of_ids
).execute()
return JSONResponse(
@@ -319,9 +369,17 @@ def set_multiple_reviewed(body: ReviewModifyMultipleBody):
)
@router.post("/reviews/delete", response_model=GenericResponse)
def delete_reviews(body: ReviewModifyMultipleBody):
list_of_ids = body.ids
@router.post("/reviews/delete")
def delete_reviews(body: dict = None):
json: dict[str, any] = body or {}
list_of_ids = json.get("ids", "")
if not list_of_ids or len(list_of_ids) == 0:
return JSONResponse(
content=({"success": False, "message": "Not a valid list of ids"}),
status_code=404,
)
reviews = (
ReviewSegment.select(
ReviewSegment.camera,
@@ -366,9 +424,7 @@ def delete_reviews(body: ReviewModifyMultipleBody):
)
@router.get(
"/review/activity/motion", response_model=list[ReviewActivityMotionResponse]
)
@router.get("/review/activity/motion")
def motion_activity(params: ReviewActivityMotionQueryParams = Depends()):
"""Get motion and audio activity."""
cameras = params.cameras
@@ -442,44 +498,98 @@ def motion_activity(params: ReviewActivityMotionQueryParams = Depends()):
return JSONResponse(content=normalized)
@router.get("/review/event/{event_id}", response_model=ReviewSegmentResponse)
@router.get("/review/activity/audio")
def audio_activity(params: ReviewActivityMotionQueryParams = Depends()):
"""Get motion and audio activity."""
cameras = params.cameras
before = params.before or datetime.datetime.now().timestamp()
after = (
params.after
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
)
# get scale in seconds
scale = params.scale
clauses = [(Recordings.start_time > after) & (Recordings.end_time < before)]
if cameras != "all":
camera_list = cameras.split(",")
clauses.append((Recordings.camera << camera_list))
all_recordings: list[Recordings] = (
Recordings.select(
Recordings.start_time,
Recordings.duration,
Recordings.objects,
Recordings.dBFS,
)
.where(reduce(operator.and_, clauses))
.order_by(Recordings.start_time.asc())
.iterator()
)
# format is: { timestamp: segment_start_ts, motion: [0-100], audio: [0 - -100] }
# periods where active objects / audio was detected will cause audio to be scaled down
data: list[dict[str, float]] = []
for rec in all_recordings:
data.append(
{
"start_time": rec.start_time,
"audio": rec.dBFS if rec.objects == 0 else 0,
}
)
# resample data using pandas to get activity on scaled basis
df = pd.DataFrame(data, columns=["start_time", "audio"])
df = df.astype(dtype={"audio": "float16"})
# set date as datetime index
df["start_time"] = pd.to_datetime(df["start_time"], unit="s")
df.set_index(["start_time"], inplace=True)
# normalize data
df = df.resample(f"{scale}S").mean().fillna(0.0)
df["audio"] = (
(df["audio"] - df["audio"].max())
/ (df["audio"].min() - df["audio"].max())
* -100
)
# change types for output
df.index = df.index.astype(int) // (10**9)
normalized = df.reset_index().to_dict("records")
return JSONResponse(content=normalized)
@router.get("/review/event/{event_id}")
def get_review_from_event(event_id: str):
try:
return JSONResponse(
model_to_dict(
ReviewSegment.get(
ReviewSegment.data["detections"].cast("text") % f'*"{event_id}"*'
)
return model_to_dict(
ReviewSegment.get(
ReviewSegment.data["detections"].cast("text") % f'*"{event_id}"*'
)
)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Review item not found"},
status_code=404,
)
return "Review item not found", 404
@router.get("/review/{review_id}", response_model=ReviewSegmentResponse)
def get_review(review_id: str):
@router.get("/review/{event_id}")
def get_review(event_id: str):
try:
return JSONResponse(
content=model_to_dict(ReviewSegment.get(ReviewSegment.id == review_id))
)
return model_to_dict(ReviewSegment.get(ReviewSegment.id == event_id))
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Review item not found"},
status_code=404,
)
return "Review item not found", 404
@router.delete("/review/{review_id}/viewed", response_model=GenericResponse)
def set_not_reviewed(review_id: str):
@router.delete("/review/{event_id}/viewed")
def set_not_reviewed(event_id: str):
try:
review: ReviewSegment = ReviewSegment.get(ReviewSegment.id == review_id)
review: ReviewSegment = ReviewSegment.get(ReviewSegment.id == event_id)
except DoesNotExist:
return JSONResponse(
content=(
{"success": False, "message": "Review " + review_id + " not found"}
{"success": False, "message": "Review " + event_id + " not found"}
),
status_code=404,
)
@@ -488,8 +598,6 @@ def set_not_reviewed(review_id: str):
review.save()
return JSONResponse(
content=(
{"success": True, "message": "Set Review " + review_id + " as not viewed"}
),
content=({"success": True, "message": "Reviewed " + event_id + " not viewed"}),
status_code=200,
)

View File

@@ -36,7 +36,6 @@ from frigate.const import (
EXPORT_DIR,
MODEL_CACHE_DIR,
RECORD_DIR,
SHM_FRAMES_VAR,
)
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.embeddings import EmbeddingsContext, manage_embeddings
@@ -69,7 +68,6 @@ from frigate.stats.util import stats_init
from frigate.storage import StorageMaintainer
from frigate.timeline import TimelineProcessor
from frigate.util.builtin import empty_and_close_queue
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
from frigate.util.object import get_camera_regions_grid
from frigate.version import VERSION
from frigate.video import capture_camera, track_camera
@@ -92,7 +90,6 @@ class FrigateApp:
self.processes: dict[str, int] = {}
self.embeddings: Optional[EmbeddingsContext] = None
self.region_grids: dict[str, list[list[dict[str, int]]]] = {}
self.frame_manager = SharedMemoryFrameManager()
self.config = config
def ensure_dirs(self) -> None:
@@ -328,20 +325,20 @@ class FrigateApp:
for det in self.config.detectors.values()
]
)
shm_in = UntrackedSharedMemory(
shm_in = mp.shared_memory.SharedMemory(
name=name,
create=True,
size=largest_frame,
)
except FileExistsError:
shm_in = UntrackedSharedMemory(name=name)
shm_in = mp.shared_memory.SharedMemory(name=name)
try:
shm_out = UntrackedSharedMemory(
shm_out = mp.shared_memory.SharedMemory(
name=f"out-{name}", create=True, size=20 * 6 * 4
)
except FileExistsError:
shm_out = UntrackedSharedMemory(name=f"out-{name}")
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}")
self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out)
@@ -434,11 +431,6 @@ class FrigateApp:
logger.info(f"Capture process not started for disabled camera {name}")
continue
# pre-create shms
for i in range(shm_frame_count):
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
self.frame_manager.create(f"{config.name}_{i}", frame_size)
capture_process = util.Process(
target=capture_camera,
name=f"camera_capture:{name}",
@@ -521,21 +513,15 @@ class FrigateApp:
1,
)
if cam_total_frame_size == 0.0:
return 0
shm_frame_count = min(
int(os.environ.get(SHM_FRAMES_VAR, "50")),
int(available_shm / (cam_total_frame_size)),
)
shm_frame_count = min(50, int(available_shm / (cam_total_frame_size)))
logger.debug(
f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM"
)
if shm_frame_count < 20:
if shm_frame_count < 10:
logger.warning(
f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 20)}MB."
f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 10)}MB."
)
return shm_frame_count
@@ -721,7 +707,6 @@ class FrigateApp:
self.event_metadata_updater.stop()
self.inter_zmq_proxy.stop()
self.frame_manager.cleanup()
while len(self.detection_shms) > 0:
shm = self.detection_shms.pop()
shm.close()

View File

@@ -22,7 +22,7 @@ from frigate.const import (
)
from frigate.models import Event, Previews, Recordings, ReviewSegment
from frigate.ptz.onvif import OnvifCommandEnum, OnvifController
from frigate.types import ModelStatusTypesEnum, TrackedObjectUpdateTypesEnum
from frigate.types import ModelStatusTypesEnum
from frigate.util.object import get_camera_regions_grid
from frigate.util.services import restart_frigate
@@ -137,14 +137,8 @@ class Dispatcher:
event.data["description"] = payload["description"]
event.save()
self.publish(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.description,
"id": event.id,
"description": event.data["description"],
}
),
"event_update",
json.dumps({"id": event.id, "description": event.data["description"]}),
)
def handle_update_model_state():

View File

@@ -14,7 +14,7 @@ class EventUpdatePublisher(Publisher):
super().__init__("update")
def publish(
self, payload: tuple[EventTypeEnum, EventStateEnum, str, str, dict[str, any]]
self, payload: tuple[EventTypeEnum, EventStateEnum, str, dict[str, any]]
) -> None:
super().publish(payload)

View File

@@ -17,7 +17,7 @@ class MqttClient(Communicator): # type: ignore[misc]
def __init__(self, config: FrigateConfig) -> None:
self.config = config
self.mqtt_config = config.mqtt
self.connected = False
self.connected: bool = False
def subscribe(self, receiver: Callable) -> None:
"""Wrapper for allowing dispatcher to subscribe."""
@@ -27,7 +27,7 @@ class MqttClient(Communicator): # type: ignore[misc]
def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
"""Wrapper for publishing when client is in valid state."""
if not self.connected:
logger.debug(f"Unable to publish to {topic}: client is not connected")
logger.error(f"Unable to publish to {topic}: client is not connected")
return
self.client.publish(
@@ -133,7 +133,7 @@ class MqttClient(Communicator): # type: ignore[misc]
"""Mqtt connection callback."""
threading.current_thread().name = "mqtt"
if reason_code != 0:
if reason_code == "Server unavailable":
if reason_code == "Server Unavailable":
logger.error(
"Unable to connect to MQTT server: MQTT Server unavailable"
)
@@ -173,7 +173,6 @@ class MqttClient(Communicator): # type: ignore[misc]
client_id=self.mqtt_config.client_id,
)
self.client.on_connect = self._on_connect
self.client.on_disconnect = self._on_disconnect
self.client.will_set(
self.mqtt_config.topic_prefix + "/available",
payload="offline",
@@ -198,6 +197,14 @@ class MqttClient(Communicator): # type: ignore[misc]
for name in self.config.cameras.keys():
for callback in callback_types:
# We need to pre-clear existing set topics because in previous
# versions the webUI retained on the /set topic but this is
# no longer the case.
self.client.publish(
f"{self.mqtt_config.topic_prefix}/{name}/{callback}/set",
None,
retain=True,
)
self.client.message_callback_add(
f"{self.mqtt_config.topic_prefix}/{name}/{callback}/set",
self.on_mqtt_command,

View File

@@ -13,7 +13,7 @@ class AuthConfig(FrigateBaseModel):
default=False, title="Reset the admin password on startup"
)
cookie_name: str = Field(
default="frigate_token", title="Name for jwt token cookie", pattern=r"^[a-z_]+$"
default="frigate_token", title="Name for jwt token cookie", pattern=r"^[a-z]_*$"
)
cookie_secure: bool = Field(default=False, title="Set secure flag on cookie")
session_length: int = Field(

View File

@@ -94,10 +94,3 @@ class RecordConfig(FrigateBaseModel):
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of recording."
)
@property
def event_pre_capture(self) -> int:
return max(
self.alerts.pre_capture,
self.detections.pre_capture,
)

View File

@@ -67,7 +67,7 @@ logger = logging.getLogger(__name__)
yaml = YAML()
DEFAULT_CONFIG_FILE = "/config/config.yml"
DEFAULT_CONFIG_FILES = ["/config/config.yaml", "/config/config.yml"]
DEFAULT_CONFIG = """
mqtt:
enabled: False
@@ -230,16 +230,12 @@ def verify_recording_segments_setup_with_reasonable_time(
try:
seg_arg_index = record_args.index("-segment_time")
except ValueError:
raise ValueError(
f"Camera {camera_config.name} has no segment_time in \
recording output args, segment args are required for record."
)
raise ValueError(f"Camera {camera_config.name} has no segment_time in \
recording output args, segment args are required for record.")
if int(record_args[seg_arg_index + 1]) > 60:
raise ValueError(
f"Camera {camera_config.name} has invalid segment_time output arg, \
segment_time must be 60 or less."
)
raise ValueError(f"Camera {camera_config.name} has invalid segment_time output arg, \
segment_time must be 60 or less.")
def verify_zone_objects_are_tracked(camera_config: CameraConfig) -> None:
@@ -638,23 +634,27 @@ class FrigateConfig(FrigateBaseModel):
@classmethod
def load(cls, **kwargs):
config_path = os.environ.get("CONFIG_FILE", DEFAULT_CONFIG_FILE)
config_path = os.environ.get("CONFIG_FILE")
if not os.path.isfile(config_path):
config_path = config_path.replace("yml", "yaml")
# No explicit configuration file, try to find one in the default paths.
if config_path is None:
for path in DEFAULT_CONFIG_FILES:
if os.path.isfile(path):
config_path = path
break
# No configuration file found, create one.
new_config = False
if not os.path.isfile(config_path):
if config_path is None:
logger.info("No config file found, saving default config")
config_path = DEFAULT_CONFIG_FILE
config_path = DEFAULT_CONFIG_FILES[-1]
new_config = True
else:
# Check if the config file needs to be migrated.
migrate_frigate_config(config_path)
# Finally, load the resulting configuration file.
with open(config_path, "a+" if new_config else "r") as f:
with open(config_path, "a+") as f:
# Only write the default config if the opened file is non-empty. This can happen as
# a race condition. It's extremely unlikely, but eh. Might as well check it.
if new_config and f.tell() == 0:

View File

@@ -23,7 +23,7 @@ EnvString = Annotated[str, AfterValidator(validate_env_string)]
def validate_env_vars(v: dict[str, str], info: ValidationInfo) -> dict[str, str]:
if isinstance(info.context, dict) and info.context.get("install", False):
for k, v in v.items():
for k, v in v:
os.environ[k] = v
return v

View File

@@ -13,8 +13,6 @@ FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
PLUS_ENV_VAR = "PLUS_API_KEY"
PLUS_API_HOST = "https://api.frigate.video"
SHM_FRAMES_VAR = "SHM_MAX_FRAMES"
# Attribute & Object constants
DEFAULT_ATTRIBUTE_LABEL_MAP = {

View File

@@ -27,11 +27,6 @@ class InputTensorEnum(str, Enum):
nhwc = "nhwc"
class InputDTypeEnum(str, Enum):
float = "float"
int = "int"
class ModelTypeEnum(str, Enum):
ssd = "ssd"
yolox = "yolox"
@@ -58,9 +53,6 @@ class ModelConfig(BaseModel):
input_pixel_format: PixelFormatEnum = Field(
default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format"
)
input_dtype: InputDTypeEnum = Field(
default=InputDTypeEnum.int, title="Model Input D Type"
)
model_type: ModelTypeEnum = Field(
default=ModelTypeEnum.ssd, title="Object Detection Model Type"
)

View File

@@ -54,7 +54,7 @@ class ONNXDetector(DetectionApi):
logger.info(f"ONNX: {path} loaded")
def detect_raw(self, tensor_input: np.ndarray):
def detect_raw(self, tensor_input):
model_input_name = self.model.get_inputs()[0].name
tensor_output = self.model.run(None, {model_input_name: tensor_input})

View File

@@ -98,7 +98,9 @@ class ROCmDetector(DetectionApi):
else:
logger.info(f"AMD/ROCm: loading model from {path}")
if (
if path.endswith(".onnx"):
self.model = migraphx.parse_onnx(path)
elif (
path.endswith(".tf")
or path.endswith(".tf2")
or path.endswith(".tflite")
@@ -106,7 +108,7 @@ class ROCmDetector(DetectionApi):
# untested
self.model = migraphx.parse_tf(path)
else:
self.model = migraphx.parse_onnx(path)
raise Exception(f"AMD/ROCm: unknown model format {path}")
logger.info("AMD/ROCm: compiling the model")

View File

@@ -1,11 +1,13 @@
"""SQLite-vec embeddings database."""
import base64
import io
import logging
import os
import time
from numpy import ndarray
from PIL import Image
from playhouse.shortcuts import model_to_dict
from frigate.comms.inter_process import InterProcessRequestor
@@ -20,7 +22,7 @@ from frigate.models import Event
from frigate.types import ModelStatusTypesEnum
from frigate.util.builtin import serialize
from .functions.onnx import GenericONNXEmbedding, ModelTypeEnum
from .functions.onnx import GenericONNXEmbedding
logger = logging.getLogger(__name__)
@@ -95,7 +97,7 @@ class Embeddings:
"text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx",
},
model_size=config.model_size,
model_type=ModelTypeEnum.text,
model_type="text",
requestor=self.requestor,
device="CPU",
)
@@ -116,102 +118,83 @@ class Embeddings:
model_file=model_file,
download_urls=download_urls,
model_size=config.model_size,
model_type=ModelTypeEnum.vision,
model_type="vision",
requestor=self.requestor,
device="GPU" if config.model_size == "large" else "CPU",
)
def embed_thumbnail(
self, event_id: str, thumbnail: bytes, upsert: bool = True
) -> ndarray:
"""Embed thumbnail and optionally insert into DB.
@param: event_id in Events DB
@param: thumbnail bytes in jpg format
@param: upsert If embedding should be upserted into vec DB
"""
def upsert_thumbnail(self, event_id: str, thumbnail: bytes) -> ndarray:
# Convert thumbnail bytes to PIL Image
embedding = self.vision_embedding([thumbnail])[0]
image = Image.open(io.BytesIO(thumbnail)).convert("RGB")
embedding = self.vision_embedding([image])[0]
if upsert:
self.db.execute_sql(
"""
INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding)
VALUES(?, ?)
""",
(event_id, serialize(embedding)),
)
self.db.execute_sql(
"""
INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding)
VALUES(?, ?)
""",
(event_id, serialize(embedding)),
)
return embedding
def batch_embed_thumbnail(
self, event_thumbs: dict[str, bytes], upsert: bool = True
) -> list[ndarray]:
"""Embed thumbnails and optionally insert into DB.
@param: event_thumbs Map of Event IDs in DB to thumbnail bytes in jpg format
@param: upsert If embedding should be upserted into vec DB
"""
def batch_upsert_thumbnail(self, event_thumbs: dict[str, bytes]) -> list[ndarray]:
images = [
Image.open(io.BytesIO(thumb)).convert("RGB")
for thumb in event_thumbs.values()
]
ids = list(event_thumbs.keys())
embeddings = self.vision_embedding(list(event_thumbs.values()))
embeddings = self.vision_embedding(images)
if upsert:
items = []
items = []
for i in range(len(ids)):
items.append(ids[i])
items.append(serialize(embeddings[i]))
self.db.execute_sql(
"""
INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding)
VALUES {}
""".format(", ".join(["(?, ?)"] * len(ids))),
items,
)
for i in range(len(ids)):
items.append(ids[i])
items.append(serialize(embeddings[i]))
self.db.execute_sql(
"""
INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding)
VALUES {}
""".format(", ".join(["(?, ?)"] * len(ids))),
items,
)
return embeddings
def embed_description(
self, event_id: str, description: str, upsert: bool = True
) -> ndarray:
def upsert_description(self, event_id: str, description: str) -> ndarray:
embedding = self.text_embedding([description])[0]
if upsert:
self.db.execute_sql(
"""
INSERT OR REPLACE INTO vec_descriptions(id, description_embedding)
VALUES(?, ?)
""",
(event_id, serialize(embedding)),
)
self.db.execute_sql(
"""
INSERT OR REPLACE INTO vec_descriptions(id, description_embedding)
VALUES(?, ?)
""",
(event_id, serialize(embedding)),
)
return embedding
def batch_embed_description(
self, event_descriptions: dict[str, str], upsert: bool = True
) -> ndarray:
def batch_upsert_description(self, event_descriptions: dict[str, str]) -> ndarray:
# upsert embeddings one by one to avoid token limit
embeddings = []
for desc in event_descriptions.values():
embeddings.append(self.text_embedding([desc])[0])
if upsert:
ids = list(event_descriptions.keys())
items = []
ids = list(event_descriptions.keys())
for i in range(len(ids)):
items.append(ids[i])
items.append(serialize(embeddings[i]))
items = []
self.db.execute_sql(
"""
INSERT OR REPLACE INTO vec_descriptions(id, description_embedding)
VALUES {}
""".format(", ".join(["(?, ?)"] * len(ids))),
items,
)
for i in range(len(ids)):
items.append(ids[i])
items.append(serialize(embeddings[i]))
self.db.execute_sql(
"""
INSERT OR REPLACE INTO vec_descriptions(id, description_embedding)
VALUES {}
""".format(", ".join(["(?, ?)"] * len(ids))),
items,
)
return embeddings
@@ -278,10 +261,10 @@ class Embeddings:
totals["processed_objects"] += 1
# run batch embedding
self.batch_embed_thumbnail(batch_thumbs)
self.batch_upsert_thumbnail(batch_thumbs)
if batch_descs:
self.batch_embed_description(batch_descs)
self.batch_upsert_description(batch_descs)
# report progress every batch so we don't spam the logs
progress = (totals["processed_objects"] / total_events) * 100

View File

@@ -1,7 +1,6 @@
import logging
import os
import warnings
from enum import Enum
from io import BytesIO
from typing import Dict, List, Optional, Union
@@ -32,12 +31,6 @@ disable_progress_bar()
logger = logging.getLogger(__name__)
class ModelTypeEnum(str, Enum):
face = "face"
vision = "vision"
text = "text"
class GenericONNXEmbedding:
"""Generic embedding function for ONNX models (text and vision)."""
@@ -95,10 +88,7 @@ class GenericONNXEmbedding:
file_name = os.path.basename(path)
if file_name in self.download_urls:
ModelDownloader.download_from_url(self.download_urls[file_name], path)
elif (
file_name == self.tokenizer_file
and self.model_type == ModelTypeEnum.text
):
elif file_name == self.tokenizer_file and self.model_type == "text":
if not os.path.exists(path + "/" + self.model_name):
logger.info(f"Downloading {self.model_name} tokenizer")
tokenizer = AutoTokenizer.from_pretrained(
@@ -129,7 +119,7 @@ class GenericONNXEmbedding:
if self.runner is None:
if self.downloader:
self.downloader.wait_for_download()
if self.model_type == ModelTypeEnum.text:
if self.model_type == "text":
self.tokenizer = self._load_tokenizer()
else:
self.feature_extractor = self._load_feature_extractor()
@@ -153,35 +143,11 @@ class GenericONNXEmbedding:
f"{MODEL_CACHE_DIR}/{self.model_name}",
)
def _preprocess_inputs(self, raw_inputs: any) -> any:
if self.model_type == ModelTypeEnum.text:
max_length = max(len(self.tokenizer.encode(text)) for text in raw_inputs)
return [
self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=max_length,
return_tensors="np",
)
for text in raw_inputs
]
elif self.model_type == ModelTypeEnum.vision:
processed_images = [self._process_image(img) for img in raw_inputs]
return [
self.feature_extractor(images=image, return_tensors="np")
for image in processed_images
]
else:
raise ValueError(f"Unable to preprocess inputs for {self.model_type}")
def _process_image(self, image):
if isinstance(image, str):
if image.startswith("http"):
response = requests.get(image)
image = Image.open(BytesIO(response.content)).convert("RGB")
elif isinstance(image, bytes):
image = Image.open(BytesIO(image)).convert("RGB")
return image
@@ -197,7 +163,25 @@ class GenericONNXEmbedding:
)
return []
processed_inputs = self._preprocess_inputs(inputs)
if self.model_type == "text":
max_length = max(len(self.tokenizer.encode(text)) for text in inputs)
processed_inputs = [
self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=max_length,
return_tensors="np",
)
for text in inputs
]
else:
processed_images = [self._process_image(img) for img in inputs]
processed_inputs = [
self.feature_extractor(images=image, return_tensors="np")
for image in processed_images
]
input_names = self.runner.get_input_names()
onnx_inputs = {name: [] for name in input_names}
input: dict[str, any]

View File

@@ -24,7 +24,6 @@ from frigate.const import CLIPS_DIR, UPDATE_EVENT_DESCRIPTION
from frigate.events.types import EventTypeEnum
from frigate.genai import get_genai_client
from frigate.models import Event
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import serialize
from frigate.util.image import SharedMemoryFrameManager, calculate_region
@@ -63,7 +62,7 @@ class EmbeddingMaintainer(threading.Thread):
self.requestor = InterProcessRequestor()
self.stop_event = stop_event
self.tracked_events = {}
self.genai_client = get_genai_client(config)
self.genai_client = get_genai_client(config.genai)
def run(self) -> None:
"""Maintain a SQLite-vec database for semantic search."""
@@ -87,7 +86,7 @@ class EmbeddingMaintainer(threading.Thread):
try:
if topic == EmbeddingsRequestEnum.embed_description.value:
return serialize(
self.embeddings.embed_description(
self.embeddings.upsert_description(
data["id"], data["description"]
),
pack=False,
@@ -95,7 +94,7 @@ class EmbeddingMaintainer(threading.Thread):
elif topic == EmbeddingsRequestEnum.embed_thumbnail.value:
thumbnail = base64.b64decode(data["thumbnail"])
return serialize(
self.embeddings.embed_thumbnail(data["id"], thumbnail),
self.embeddings.upsert_thumbnail(data["id"], thumbnail),
pack=False,
)
elif topic == EmbeddingsRequestEnum.generate_search.value:
@@ -114,7 +113,7 @@ class EmbeddingMaintainer(threading.Thread):
if update is None:
return
source_type, _, camera, frame_name, data = update
source_type, _, camera, data = update
if not camera or source_type != EventTypeEnum.tracked_object:
return
@@ -134,9 +133,8 @@ class EmbeddingMaintainer(threading.Thread):
# Create our own thumbnail based on the bounding box and the frame time
try:
yuv_frame = self.frame_manager.get(
frame_name, camera_config.frame_shape_yuv
)
frame_id = f"{camera}{data['frame_time']}"
yuv_frame = self.frame_manager.get(frame_id, camera_config.frame_shape_yuv)
if yuv_frame is not None:
data["thumbnail"] = self._create_thumbnail(yuv_frame, data["box"])
@@ -148,7 +146,7 @@ class EmbeddingMaintainer(threading.Thread):
self.tracked_events[data["id"]].append(data)
self.frame_manager.close(frame_name)
self.frame_manager.close(frame_id)
except FileNotFoundError:
pass
@@ -272,7 +270,7 @@ class EmbeddingMaintainer(threading.Thread):
def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None:
"""Embed the thumbnail for an event."""
self.embeddings.embed_thumbnail(event_id, thumbnail)
self.embeddings.upsert_thumbnail(event_id, thumbnail)
def _embed_description(self, event: Event, thumbnails: list[bytes]) -> None:
"""Embed the description for an event."""
@@ -289,15 +287,11 @@ class EmbeddingMaintainer(threading.Thread):
# fire and forget description update
self.requestor.send_data(
UPDATE_EVENT_DESCRIPTION,
{
"type": TrackedObjectUpdateTypesEnum.description,
"id": event.id,
"description": description,
},
{"id": event.id, "description": description},
)
# Embed the description
self.embeddings.embed_description(event.id, description)
# Encode the description
self.embeddings.upsert_description(event.id, description)
logger.debug(
"Generated description for %s (%d images): %s",

View File

@@ -64,8 +64,6 @@ def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]:
class AudioProcessor(util.Process):
name = "frigate.audio_manager"
def __init__(
self,
cameras: list[CameraConfig],
@@ -216,10 +214,6 @@ class AudioEventMaintainer(threading.Thread):
"label": label,
"last_detection": datetime.datetime.now().timestamp(),
}
else:
self.logger.warning(
f"Failed to create audio event with status code {resp.status_code}"
)
def expire_detections(self) -> None:
now = datetime.datetime.now().timestamp()

View File

@@ -21,9 +21,6 @@ class EventCleanupType(str, Enum):
snapshots = "snapshots"
CHUNK_SIZE = 50
class EventCleanup(threading.Thread):
def __init__(
self, config: FrigateConfig, stop_event: MpEvent, db: SqliteVecQueueDatabase
@@ -110,7 +107,6 @@ class EventCleanup(threading.Thread):
.namedtuples()
.iterator()
)
logger.debug(f"{len(list(expired_events))} events can be expired")
# delete the media from disk
for expired in expired_events:
media_name = f"{expired.camera}-{expired.id}"
@@ -129,34 +125,13 @@ class EventCleanup(threading.Thread):
logger.warning(f"Unable to delete event images: {e}")
# update the clips attribute for the db entry
query = Event.select(Event.id).where(
update_query = Event.update(update_params).where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == event.label,
Event.retain_indefinitely == False,
)
events_to_update = []
for batch in query.iterator():
events_to_update.extend([event.id for event in batch])
if len(events_to_update) >= CHUNK_SIZE:
logger.debug(
f"Updating {update_params} for {len(events_to_update)} events"
)
Event.update(update_params).where(
Event.id << events_to_update
).execute()
events_to_update = []
# Update any remaining events
if events_to_update:
logger.debug(
f"Updating clips/snapshots attribute for {len(events_to_update)} events"
)
Event.update(update_params).where(
Event.id << events_to_update
).execute()
update_query.execute()
events_to_update = []
@@ -221,11 +196,7 @@ class EventCleanup(threading.Thread):
logger.warning(f"Unable to delete event images: {e}")
# update the clips attribute for the db entry
for i in range(0, len(events_to_update), CHUNK_SIZE):
batch = events_to_update[i : i + CHUNK_SIZE]
logger.debug(f"Updating {update_params} for {len(batch)} events")
Event.update(update_params).where(Event.id << batch).execute()
Event.update(update_params).where(Event.id << events_to_update).execute()
return events_to_update
def run(self) -> None:
@@ -251,11 +222,10 @@ class EventCleanup(threading.Thread):
.iterator()
)
events_to_delete = [e.id for e in events]
logger.debug(f"Found {len(events_to_delete)} events that can be expired")
if len(events_to_delete) > 0:
for i in range(0, len(events_to_delete), CHUNK_SIZE):
chunk = events_to_delete[i : i + CHUNK_SIZE]
logger.debug(f"Deleting {len(chunk)} events from the database")
chunk_size = 50
for i in range(0, len(events_to_delete), chunk_size):
chunk = events_to_delete[i : i + chunk_size]
Event.delete().where(Event.id << chunk).execute()
if self.config.semantic_search.enabled:

View File

@@ -10,7 +10,6 @@ from enum import Enum
from typing import Optional
import cv2
from numpy import ndarray
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
from frigate.comms.events_updater import EventUpdatePublisher
@@ -46,7 +45,7 @@ class ExternalEventProcessor:
duration: Optional[int],
include_recording: bool,
draw: dict[str, any],
snapshot_frame: Optional[ndarray],
snapshot_frame: any,
) -> str:
now = datetime.datetime.now().timestamp()
camera_config = self.config.cameras.get(camera)
@@ -65,14 +64,13 @@ class ExternalEventProcessor:
EventTypeEnum.api,
EventStateEnum.start,
camera,
"",
{
"id": event_id,
"label": label,
"sub_label": sub_label,
"score": score,
"camera": camera,
"start_time": now - camera_config.record.event_pre_capture,
"start_time": now,
"end_time": end,
"thumbnail": thumbnail,
"has_clip": camera_config.record.enabled and include_recording,
@@ -108,7 +106,6 @@ class ExternalEventProcessor:
EventTypeEnum.api,
EventStateEnum.end,
None,
"",
{"id": event_id, "end_time": end_time},
)
)
@@ -133,11 +130,8 @@ class ExternalEventProcessor:
label: str,
event_id: str,
draw: dict[str, any],
img_frame: Optional[ndarray],
) -> Optional[str]:
if img_frame is None:
return None
img_frame: any,
) -> str:
# write clean snapshot if enabled
if camera_config.snapshots.clean_copy:
ret, png = cv2.imencode(".png", img_frame)

View File

@@ -75,7 +75,7 @@ class EventProcessor(threading.Thread):
if update == None:
continue
source_type, event_type, camera, _, event_data = update
source_type, event_type, camera, event_data = update
logger.debug(
f"Event received: {source_type} {event_type} {camera} {event_data['id']}"

View File

@@ -1,17 +1,14 @@
"""Generative AI module for Frigate."""
import importlib
import logging
import os
from typing import Optional
from playhouse.shortcuts import model_to_dict
from frigate.config import CameraConfig, FrigateConfig, GenAIConfig, GenAIProviderEnum
from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum
from frigate.models import Event
logger = logging.getLogger(__name__)
PROVIDERS = {}
@@ -44,7 +41,6 @@ class GenAIClient:
event.label,
camera_config.genai.prompt,
).format(**model_to_dict(event))
logger.debug(f"Sending images to genai provider with prompt: {prompt}")
return self._send(prompt, thumbnails)
def _init_provider(self):
@@ -56,19 +52,13 @@ class GenAIClient:
return None
def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]:
def get_genai_client(genai_config: GenAIConfig) -> Optional[GenAIClient]:
"""Get the GenAI client."""
genai_config = config.genai
genai_cameras = [
c for c in config.cameras.values() if c.enabled and c.genai.enabled
]
if genai_cameras:
if genai_config.enabled:
load_providers()
provider = PROVIDERS.get(genai_config.provider)
if provider:
return provider(genai_config)
return None

View File

@@ -93,7 +93,7 @@ class ReviewSegment(Model): # type: ignore[misc]
start_time = DateTimeField()
end_time = DateTimeField()
has_been_reviewed = BooleanField(default=False)
severity = CharField(max_length=30) # alert, detection
severity = CharField(max_length=30) # alert, detection, significant_motion
thumb_path = CharField(unique=True)
data = JSONField() # additional data about detection like list of labels, zone, areas of significant motion

View File

@@ -59,7 +59,3 @@ ignore_errors = false
[mypy-frigate.watchdog]
ignore_errors = false
disallow_untyped_calls = false
[mypy-frigate.service_manager.*]
ignore_errors = false

View File

@@ -12,14 +12,10 @@ from setproctitle import setproctitle
import frigate.util as util
from frigate.detectors import create_detector
from frigate.detectors.detector_config import (
BaseDetectorConfig,
InputDTypeEnum,
InputTensorEnum,
)
from frigate.detectors.detector_config import BaseDetectorConfig, InputTensorEnum
from frigate.detectors.plugins.rocm import DETECTOR_KEY as ROCM_DETECTOR_KEY
from frigate.util.builtin import EventsPerSecond, load_labels
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
from frigate.util.image import SharedMemoryFrameManager
from frigate.util.services import listen
logger = logging.getLogger(__name__)
@@ -59,15 +55,12 @@ class LocalObjectDetector(ObjectDetector):
self.input_transform = tensor_transform(
detector_config.model.input_tensor
)
self.dtype = detector_config.model.input_dtype
else:
self.input_transform = None
self.dtype = InputDTypeEnum.int
self.detect_api = create_detector(detector_config)
def detect(self, tensor_input: np.ndarray, threshold=0.4):
def detect(self, tensor_input, threshold=0.4):
detections = []
raw_detections = self.detect_raw(tensor_input)
@@ -84,14 +77,9 @@ class LocalObjectDetector(ObjectDetector):
self.fps.update()
return detections
def detect_raw(self, tensor_input: np.ndarray):
def detect_raw(self, tensor_input):
if self.input_transform:
tensor_input = np.transpose(tensor_input, self.input_transform)
if self.dtype == InputDTypeEnum.float:
tensor_input = tensor_input.astype(np.float32)
tensor_input /= 255
return self.detect_api.detect_raw(tensor_input=tensor_input)
@@ -122,7 +110,7 @@ def run_detector(
outputs = {}
for name in out_events.keys():
out_shm = UntrackedSharedMemory(name=f"out-{name}", create=False)
out_shm = mp.shared_memory.SharedMemory(name=f"out-{name}", create=False)
out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
outputs[name] = {"shm": out_shm, "np": out_np}
@@ -212,13 +200,15 @@ class RemoteObjectDetector:
self.detection_queue = detection_queue
self.event = event
self.stop_event = stop_event
self.shm = UntrackedSharedMemory(name=self.name, create=False)
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
self.np_shm = np.ndarray(
(1, model_config.height, model_config.width, 3),
dtype=np.uint8,
buffer=self.shm.buf,
)
self.out_shm = UntrackedSharedMemory(name=f"out-{self.name}", create=False)
self.out_shm = mp.shared_memory.SharedMemory(
name=f"out-{self.name}", create=False
)
self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)
def detect(self, tensor_input, threshold=0.4):

View File

@@ -6,7 +6,7 @@ import queue
import threading
from collections import Counter, defaultdict
from multiprocessing.synchronize import Event as MpEvent
from typing import Callable, Optional
from typing import Callable
import cv2
import numpy as np
@@ -233,18 +233,17 @@ class CameraState:
def on(self, event_type: str, callback: Callable[[dict], None]):
self.callbacks[event_type].append(callback)
def update(
self,
frame_name: str,
frame_time: float,
current_detections: dict[str, dict[str, any]],
motion_boxes: list[tuple[int, int, int, int]],
regions: list[tuple[int, int, int, int]],
):
def update(self, frame_time, current_detections, motion_boxes, regions):
# get the new frame
frame_id = f"{self.name}{frame_time}"
current_frame = self.frame_manager.get(
frame_name, self.camera_config.frame_shape_yuv
frame_id, self.camera_config.frame_shape_yuv
)
if current_frame is None:
logger.debug(f"Failed to get frame {frame_id} from SHM")
tracked_objects = self.tracked_objects.copy()
current_ids = set(current_detections.keys())
previous_ids = set(tracked_objects.keys())
@@ -262,7 +261,7 @@ class CameraState:
# call event handlers
for c in self.callbacks["start"]:
c(self.name, new_obj, frame_name)
c(self.name, new_obj, frame_time)
for id in updated_ids:
updated_obj = tracked_objects[id]
@@ -272,7 +271,7 @@ class CameraState:
if autotracker_update or significant_update:
for c in self.callbacks["autotrack"]:
c(self.name, updated_obj, frame_name)
c(self.name, updated_obj, frame_time)
if thumb_update and current_frame is not None:
# ensure this frame is stored in the cache
@@ -293,7 +292,7 @@ class CameraState:
) or significant_update:
# call event handlers
for c in self.callbacks["update"]:
c(self.name, updated_obj, frame_name)
c(self.name, updated_obj, frame_time)
updated_obj.last_published = frame_time
for id in removed_ids:
@@ -302,7 +301,7 @@ class CameraState:
if "end_time" not in removed_obj.obj_data:
removed_obj.obj_data["end_time"] = frame_time
for c in self.callbacks["end"]:
c(self.name, removed_obj, frame_name)
c(self.name, removed_obj, frame_time)
# TODO: can i switch to looking this up and only changing when an event ends?
# maintain best objects
@@ -345,7 +344,6 @@ class CameraState:
# if the object's thumbnail is not from the current frame, skip
if (
current_frame is None
or obj.thumbnail_data is None
or obj.false_positive
or obj.thumbnail_data["frame_time"] != frame_time
):
@@ -368,11 +366,11 @@ class CameraState:
):
self.best_objects[object_type] = obj
for c in self.callbacks["snapshot"]:
c(self.name, self.best_objects[object_type], frame_name)
c(self.name, self.best_objects[object_type], frame_time)
else:
self.best_objects[object_type] = obj
for c in self.callbacks["snapshot"]:
c(self.name, self.best_objects[object_type], frame_name)
c(self.name, self.best_objects[object_type], frame_time)
for c in self.callbacks["camera_activity"]:
c(self.name, camera_activity)
@@ -447,7 +445,7 @@ class CameraState:
c(self.name, obj_name, 0)
self.active_object_counts[obj_name] = 0
for c in self.callbacks["snapshot"]:
c(self.name, self.best_objects[obj_name], frame_name)
c(self.name, self.best_objects[obj_name], frame_time)
# cleanup thumbnail frame cache
current_thumb_frames = {
@@ -478,7 +476,7 @@ class CameraState:
if self.previous_frame_id is not None:
self.frame_manager.close(self.previous_frame_id)
self.previous_frame_id = frame_name
self.previous_frame_id = frame_id
class TrackedObjectProcessor(threading.Thread):
@@ -518,18 +516,17 @@ class TrackedObjectProcessor(threading.Thread):
self.zone_data = defaultdict(lambda: defaultdict(dict))
self.active_zone_data = defaultdict(lambda: defaultdict(dict))
def start(camera: str, obj: TrackedObject, frame_name: str):
def start(camera, obj: TrackedObject, current_frame_time):
self.event_sender.publish(
(
EventTypeEnum.tracked_object,
EventStateEnum.start,
camera,
frame_name,
obj.to_dict(),
)
)
def update(camera: str, obj: TrackedObject, frame_name: str):
def update(camera, obj: TrackedObject, current_frame_time):
obj.has_snapshot = self.should_save_snapshot(camera, obj)
obj.has_clip = self.should_retain_recording(camera, obj)
after = obj.to_dict()
@@ -545,15 +542,14 @@ class TrackedObjectProcessor(threading.Thread):
EventTypeEnum.tracked_object,
EventStateEnum.update,
camera,
frame_name,
obj.to_dict(include_thumbnail=True),
)
)
def autotrack(camera: str, obj: TrackedObject, frame_name: str):
def autotrack(camera, obj: TrackedObject, current_frame_time):
self.ptz_autotracker_thread.ptz_autotracker.autotrack_object(camera, obj)
def end(camera: str, obj: TrackedObject, frame_name: str):
def end(camera, obj: TrackedObject, current_frame_time):
# populate has_snapshot
obj.has_snapshot = self.should_save_snapshot(camera, obj)
obj.has_clip = self.should_retain_recording(camera, obj)
@@ -608,12 +604,11 @@ class TrackedObjectProcessor(threading.Thread):
EventTypeEnum.tracked_object,
EventStateEnum.end,
camera,
frame_name,
obj.to_dict(include_thumbnail=True),
)
)
def snapshot(camera, obj: TrackedObject, frame_name: str):
def snapshot(camera, obj: TrackedObject, current_frame_time):
mqtt_config: MqttConfig = self.config.cameras[camera].mqtt
if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
jpg_bytes = obj.get_jpg_bytes(
@@ -718,8 +713,7 @@ class TrackedObjectProcessor(threading.Thread):
)
and (
not review_config.detections.required_zones
or set(obj.entered_zones)
& set(review_config.detections.required_zones)
or set(obj.entered_zones) & set(review_config.alerts.required_zones)
)
)
):
@@ -784,18 +778,13 @@ class TrackedObjectProcessor(threading.Thread):
else:
return {}
def get_current_frame(
self, camera: str, draw_options: dict[str, any] = {}
) -> Optional[np.ndarray]:
def get_current_frame(self, camera, draw_options={}):
if camera == "birdseye":
return self.frame_manager.get(
"birdseye",
(self.config.birdseye.height * 3 // 2, self.config.birdseye.width),
)
if camera not in self.camera_states:
return None
return self.camera_states[camera].get_current_frame(draw_options)
def get_current_frame_time(self, camera) -> int:
@@ -807,7 +796,6 @@ class TrackedObjectProcessor(threading.Thread):
try:
(
camera,
frame_name,
frame_time,
current_tracked_objects,
motion_boxes,
@@ -819,7 +807,7 @@ class TrackedObjectProcessor(threading.Thread):
camera_state = self.camera_states[camera]
camera_state.update(
frame_name, frame_time, current_tracked_objects, motion_boxes, regions
frame_time, current_tracked_objects, motion_boxes, regions
)
self.update_mqtt_motion(camera, frame_time, motion_boxes)
@@ -832,7 +820,6 @@ class TrackedObjectProcessor(threading.Thread):
self.detection_publisher.publish(
(
camera,
frame_name,
frame_time,
tracked_objects,
motion_boxes,

View File

@@ -268,10 +268,12 @@ class BirdsEyeFrameManager:
def __init__(
self,
config: FrigateConfig,
frame_manager: SharedMemoryFrameManager,
stop_event: mp.Event,
):
self.config = config
self.mode = config.birdseye.mode
self.frame_manager = frame_manager
width, height = get_canvas_shape(config.birdseye.width, config.birdseye.height)
self.frame_shape = (height, width)
self.yuv_shape = (height * 3 // 2, width)
@@ -349,13 +351,18 @@ class BirdsEyeFrameManager:
logger.debug("Clearing the birdseye frame")
self.frame[:] = self.blank_frame
def copy_to_position(self, position, camera=None, frame: np.ndarray = None):
def copy_to_position(self, position, camera=None, frame_time=None):
if camera is None:
frame = None
channel_dims = None
else:
frame_id = f"{camera}{frame_time}"
frame = self.frame_manager.get(
frame_id, self.config.cameras[camera].frame_shape_yuv
)
if frame is None:
logger.debug(f"Unable to copy frame {camera} to birdseye.")
logger.debug(f"Unable to copy frame {camera}{frame_time} to birdseye.")
return
channel_dims = self.cameras[camera]["channel_dims"]
@@ -368,6 +375,8 @@ class BirdsEyeFrameManager:
channel_dims,
)
self.frame_manager.close(frame_id)
def camera_active(self, mode, object_box_count, motion_box_count):
if mode == BirdseyeModeEnum.continuous:
return True
@@ -378,7 +387,7 @@ class BirdsEyeFrameManager:
if mode == BirdseyeModeEnum.objects and object_box_count > 0:
return True
def update_frame(self, frame: np.ndarray):
def update_frame(self):
"""Update to a new frame for birdseye."""
# determine how many cameras are tracking objects within the last inactivity_threshold seconds
@@ -388,7 +397,7 @@ class BirdsEyeFrameManager:
for cam, cam_data in self.cameras.items()
if self.config.cameras[cam].birdseye.enabled
and cam_data["last_active_frame"] > 0
and cam_data["current_frame_time"] - cam_data["last_active_frame"]
and cam_data["current_frame"] - cam_data["last_active_frame"]
< self.inactivity_threshold
]
)
@@ -405,7 +414,7 @@ class BirdsEyeFrameManager:
limited_active_cameras = sorted(
active_cameras,
key=lambda active_camera: (
self.cameras[active_camera]["current_frame_time"]
self.cameras[active_camera]["current_frame"]
- self.cameras[active_camera]["last_active_frame"]
),
)
@@ -515,9 +524,7 @@ class BirdsEyeFrameManager:
for row in self.camera_layout:
for position in row:
self.copy_to_position(
position[1],
position[0],
self.cameras[position[0]]["current_frame"],
position[1], position[0], self.cameras[position[0]]["current_frame"]
)
return True
@@ -665,14 +672,7 @@ class BirdsEyeFrameManager:
else:
return standard_candidate_layout
def update(
self,
camera: str,
object_count: int,
motion_count: int,
frame_time: float,
frame: np.ndarray,
) -> bool:
def update(self, camera, object_count, motion_count, frame_time, frame) -> bool:
# don't process if birdseye is disabled for this camera
camera_config = self.config.cameras[camera].birdseye
@@ -689,8 +689,7 @@ class BirdsEyeFrameManager:
return False
# update the last active frame for the camera
self.cameras[camera]["current_frame"] = frame.copy()
self.cameras[camera]["current_frame_time"] = frame_time
self.cameras[camera]["current_frame"] = frame_time
if self.camera_active(camera_config.mode, object_count, motion_count):
self.cameras[camera]["last_active_frame"] = frame_time
@@ -701,7 +700,7 @@ class BirdsEyeFrameManager:
return False
try:
updated_frame = self.update_frame(frame)
updated_frame = self.update_frame()
except Exception:
updated_frame = False
self.active_cameras = []
@@ -738,12 +737,12 @@ class Birdseye:
self.broadcaster = BroadcastThread(
"birdseye", self.converter, websocket_server, stop_event
)
self.birdseye_manager = BirdsEyeFrameManager(config, stop_event)
frame_manager = SharedMemoryFrameManager()
self.birdseye_manager = BirdsEyeFrameManager(config, frame_manager, stop_event)
self.config_subscriber = ConfigSubscriber("config/birdseye/")
self.frame_manager = SharedMemoryFrameManager()
if config.birdseye.restream:
self.birdseye_buffer = self.frame_manager.create(
self.birdseye_buffer = frame_manager.create(
"birdseye",
self.birdseye_manager.yuv_shape[0] * self.birdseye_manager.yuv_shape[1],
)
@@ -757,7 +756,7 @@ class Birdseye:
current_tracked_objects: list[dict[str, any]],
motion_boxes: list[list[int]],
frame_time: float,
frame: np.ndarray,
frame,
) -> None:
# check if there is an updated config
while True:

View File

@@ -63,7 +63,6 @@ def output_frames(
birdseye: Optional[Birdseye] = None
preview_recorders: dict[str, PreviewRecorder] = {}
preview_write_times: dict[str, float] = {}
failed_frame_requests: dict[str, int] = {}
move_preview_frames("cache")
@@ -88,27 +87,19 @@ def output_frames(
(
camera,
frame_name,
frame_time,
current_tracked_objects,
motion_boxes,
_,
regions,
) = data
frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv)
frame_id = f"{camera}{frame_time}"
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
if frame is None:
logger.debug(f"Failed to get frame {frame_name} from SHM")
failed_frame_requests[camera] = failed_frame_requests.get(camera, 0) + 1
if failed_frame_requests[camera] > config.cameras[camera].detect.fps:
logger.warning(
f"Failed to retrieve many frames for {camera} from SHM, consider increasing SHM size if this continues."
)
logger.debug(f"Failed to get frame {frame_id} from SHM")
continue
else:
failed_frame_requests[camera] = 0
# send camera frame to ffmpeg process if websockets are connected
if any(
@@ -143,15 +134,12 @@ def output_frames(
# check for any cameras that are currently offline
# and need to generate a preview
if generated_preview:
logger.debug(
"Checking for offline cameras because another camera generated a preview."
)
for camera, time in preview_write_times.copy().items():
if time != 0 and frame_time - time > 10:
preview_recorders[camera].flag_offline(frame_time)
preview_write_times[camera] = frame_time
frame_manager.close(frame_name)
frame_manager.close(frame_id)
move_preview_frames("clips")
@@ -163,15 +151,15 @@ def output_frames(
(
camera,
frame_name,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = data
frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv)
frame_manager.close(frame_name)
frame_id = f"{camera}{frame_time}"
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
frame_manager.close(frame_id)
detection_subscriber.stop()

View File

@@ -78,7 +78,7 @@ class FFMpegConverter(threading.Thread):
# write a PREVIEW at fps and 1 key frame per clip
self.ffmpeg_cmd = parse_preset_hardware_acceleration_encode(
config.ffmpeg.ffmpeg_path,
"default",
config.ffmpeg.hwaccel_args,
input="-f concat -y -protocol_whitelist pipe,file -safe 0 -threads 1 -i /dev/stdin",
output=f"-threads 1 -g {PREVIEW_KEYFRAME_INTERVAL} -bf 0 -b:v {PREVIEW_QUALITY_BIT_RATES[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}",
type=EncodeTypeEnum.preview,
@@ -154,7 +154,6 @@ class PreviewRecorder:
self.start_time = 0
self.last_output_time = 0
self.output_frames = []
if config.detect.width > config.detect.height:
self.out_height = PREVIEW_HEIGHT
self.out_width = (
@@ -275,7 +274,7 @@ class PreviewRecorder:
return False
def write_frame_to_cache(self, frame_time: float, frame: np.ndarray) -> None:
def write_frame_to_cache(self, frame_time: float, frame) -> None:
# resize yuv frame
small_frame = np.zeros((self.out_height * 3 // 2, self.out_width), np.uint8)
copy_yuv_to_position(
@@ -304,7 +303,7 @@ class PreviewRecorder:
current_tracked_objects: list[dict[str, any]],
motion_boxes: list[list[int]],
frame_time: float,
frame: np.ndarray,
frame,
) -> bool:
# check for updated record config
_, updated_record_config = self.config_subscriber.check_for_update()
@@ -333,10 +332,6 @@ class PreviewRecorder:
self.output_frames,
self.requestor,
).start()
else:
logger.debug(
f"Not saving preview for {self.config.name} because there are no saved frames."
)
# reset frame cache
self.segment_end = (

View File

@@ -59,13 +59,7 @@ class PtzMotionEstimator:
self.ptz_metrics.reset.set()
logger.debug(f"{config.name}: Motion estimator init")
def motion_estimator(
self,
detections: list[dict[str, any]],
frame_name: str,
frame_time: float,
camera: str,
):
def motion_estimator(self, detections, frame_time, camera):
# If we've just started up or returned to our preset, reset motion estimator for new tracking session
if self.ptz_metrics.reset.is_set():
self.ptz_metrics.reset.clear()
@@ -98,8 +92,9 @@ class PtzMotionEstimator:
f"{camera}: Motion estimator running - frame time: {frame_time}"
)
frame_id = f"{camera}{frame_time}"
yuv_frame = self.frame_manager.get(
frame_name, self.camera_config.frame_shape_yuv
frame_id, self.camera_config.frame_shape_yuv
)
if yuv_frame is None:
@@ -141,7 +136,7 @@ class PtzMotionEstimator:
except Exception:
pass
self.frame_manager.close(frame_name)
self.frame_manager.close(frame_id)
return self.coord_transformations

View File

@@ -27,7 +27,6 @@ from frigate.ffmpeg_presets import (
parse_preset_hardware_acceleration_encode,
)
from frigate.models import Export, Previews, Recordings
from frigate.util.builtin import is_current_hour
logger = logging.getLogger(__name__)
@@ -44,11 +43,6 @@ class PlaybackFactorEnum(str, Enum):
timelapse_25x = "timelapse_25x"
class PlaybackSourceEnum(str, Enum):
recordings = "recordings"
preview = "preview"
class RecordingExporter(threading.Thread):
"""Exports a specific set of recordings for a camera to storage as a single file."""
@@ -62,7 +56,6 @@ class RecordingExporter(threading.Thread):
start_time: int,
end_time: int,
playback_factor: PlaybackFactorEnum,
playback_source: PlaybackSourceEnum,
) -> None:
super().__init__()
self.config = config
@@ -73,7 +66,6 @@ class RecordingExporter(threading.Thread):
self.start_time = start_time
self.end_time = end_time
self.playback_factor = playback_factor
self.playback_source = playback_source
# ensure export thumb dir
Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True)
@@ -178,7 +170,30 @@ class RecordingExporter(threading.Thread):
return thumb_path
def get_record_export_command(self, video_path: str) -> list[str]:
def run(self) -> None:
logger.debug(
f"Beginning export for {self.camera} from {self.start_time} to {self.end_time}"
)
export_name = (
self.user_provided_name
or f"{self.camera.replace('_', ' ')} {self.get_datetime_from_timestamp(self.start_time)} {self.get_datetime_from_timestamp(self.end_time)}"
)
video_path = f"{EXPORT_DIR}/{self.export_id}.mp4"
thumb_path = self.save_thumbnail(self.export_id)
Export.insert(
{
Export.id: self.export_id,
Export.camera: self.camera,
Export.name: export_name,
Export.date: self.start_time,
Export.video_path: video_path,
Export.thumb_path: thumb_path,
Export.in_progress: True,
}
).execute()
if (self.end_time - self.start_time) <= MAX_PLAYLIST_SECONDS:
playlist_lines = f"http://127.0.0.1:5000/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8"
ffmpeg_input = (
@@ -189,10 +204,7 @@ class RecordingExporter(threading.Thread):
# get full set of recordings
export_recordings = (
Recordings.select(
Recordings.start_time,
Recordings.end_time,
)
Recordings.select()
.where(
Recordings.start_time.between(self.start_time, self.end_time)
| Recordings.end_time.between(self.start_time, self.end_time)
@@ -221,91 +233,6 @@ class RecordingExporter(threading.Thread):
ffmpeg_cmd = (
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart {video_path}"
).split(" ")
elif self.playback_factor == PlaybackFactorEnum.timelapse_25x:
ffmpeg_cmd = (
parse_preset_hardware_acceleration_encode(
self.config.ffmpeg.ffmpeg_path,
self.config.ffmpeg.hwaccel_args,
f"-an {ffmpeg_input}",
f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart {video_path}",
EncodeTypeEnum.timelapse,
)
).split(" ")
return ffmpeg_cmd, playlist_lines
def get_preview_export_command(self, video_path: str) -> list[str]:
playlist_lines = []
codec = "-c copy"
if is_current_hour(self.start_time):
# get list of current preview frames
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
file_start = f"preview_{self.camera}"
start_file = f"{file_start}-{self.start_time}.{PREVIEW_FRAME_TYPE}"
end_file = f"{file_start}-{self.end_time}.{PREVIEW_FRAME_TYPE}"
for file in sorted(os.listdir(preview_dir)):
if not file.startswith(file_start):
continue
if file < start_file:
continue
if file > end_file:
break
playlist_lines.append(f"file '{os.path.join(preview_dir, file)}'")
playlist_lines.append("duration 0.12")
if playlist_lines:
last_file = playlist_lines[-2]
playlist_lines.append(last_file)
codec = "-c:v libx264"
# get full set of previews
export_previews = (
Previews.select(
Previews.path,
Previews.start_time,
Previews.end_time,
)
.where(
Previews.start_time.between(self.start_time, self.end_time)
| Previews.end_time.between(self.start_time, self.end_time)
| (
(self.start_time > Previews.start_time)
& (self.end_time < Previews.end_time)
)
)
.where(Previews.camera == self.camera)
.order_by(Previews.start_time.asc())
.namedtuples()
.iterator()
)
preview: Previews
for preview in export_previews:
playlist_lines.append(f"file '{preview.path}'")
if preview.start_time < self.start_time:
playlist_lines.append(
f"inpoint {int(self.start_time - preview.start_time)}"
)
if preview.end_time > self.end_time:
playlist_lines.append(
f"outpoint {int(preview.end_time - self.end_time)}"
)
ffmpeg_input = (
"-y -protocol_whitelist pipe,file,tcp -f concat -safe 0 -i /dev/stdin"
)
if self.playback_factor == PlaybackFactorEnum.realtime:
ffmpeg_cmd = (
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} {codec} -movflags +faststart {video_path}"
).split(" ")
elif self.playback_factor == PlaybackFactorEnum.timelapse_25x:
ffmpeg_cmd = (
parse_preset_hardware_acceleration_encode(
@@ -317,36 +244,6 @@ class RecordingExporter(threading.Thread):
)
).split(" ")
return ffmpeg_cmd, playlist_lines
def run(self) -> None:
logger.debug(
f"Beginning export for {self.camera} from {self.start_time} to {self.end_time}"
)
export_name = (
self.user_provided_name
or f"{self.camera.replace('_', ' ')} {self.get_datetime_from_timestamp(self.start_time)} {self.get_datetime_from_timestamp(self.end_time)}"
)
video_path = f"{EXPORT_DIR}/{self.export_id}.mp4"
thumb_path = self.save_thumbnail(self.export_id)
Export.insert(
{
Export.id: self.export_id,
Export.camera: self.camera,
Export.name: export_name,
Export.date: self.start_time,
Export.video_path: video_path,
Export.thumb_path: thumb_path,
Export.in_progress: True,
}
).execute()
if self.playback_source == PlaybackSourceEnum.recordings:
ffmpeg_cmd, playlist_lines = self.get_record_export_command(video_path)
else:
ffmpeg_cmd, playlist_lines = self.get_preview_export_command(video_path)
p = sp.run(
ffmpeg_cmd,
input="\n".join(playlist_lines),
@@ -357,7 +254,7 @@ class RecordingExporter(threading.Thread):
if p.returncode != 0:
logger.error(
f"Failed to export {self.playback_source.value} for command {' '.join(ffmpeg_cmd)}"
f"Failed to export recording for command {' '.join(ffmpeg_cmd)}"
)
logger.error(p.stderr)
Path(video_path).unlink(missing_ok=True)

View File

@@ -299,12 +299,16 @@ class RecordingMaintainer(threading.Thread):
# if it doesn't overlap with an event, go ahead and drop the segment
# if it ends more than the configured pre_capture for the camera
else:
pre_capture = max(
record_config.alerts.pre_capture,
record_config.detections.pre_capture,
)
camera_info = self.object_recordings_info[camera]
most_recently_processed_frame_time = (
camera_info[-1][0] if len(camera_info) > 0 else 0
)
retain_cutoff = datetime.datetime.fromtimestamp(
most_recently_processed_frame_time - record_config.event_pre_capture
most_recently_processed_frame_time - pre_capture
).astimezone(datetime.timezone.utc)
if end_time < retain_cutoff:
Path(cache_path).unlink(missing_ok=True)
@@ -514,7 +518,6 @@ class RecordingMaintainer(threading.Thread):
if topic == DetectionTypeEnum.video:
(
camera,
_,
frame_time,
current_tracked_objects,
motion_boxes,

View File

@@ -51,7 +51,7 @@ class PendingReviewSegment:
frame_time: float,
severity: SeverityEnum,
detections: dict[str, str],
sub_labels: dict[str, str],
sub_labels: set[str],
zones: list[str],
audio: set[str],
):
@@ -135,7 +135,7 @@ class PendingReviewSegment:
ReviewSegment.data.name: {
"detections": list(set(self.detections.keys())),
"objects": list(set(self.detections.values())),
"sub_labels": list(self.sub_labels.values()),
"sub_labels": list(self.sub_labels),
"zones": self.zones,
"audio": list(self.audio),
},
@@ -234,7 +234,6 @@ class ReviewSegmentMaintainer(threading.Thread):
def update_existing_segment(
self,
segment: PendingReviewSegment,
frame_name: str,
frame_time: float,
objects: list[TrackedObject],
) -> None:
@@ -262,7 +261,7 @@ class ReviewSegmentMaintainer(threading.Thread):
segment.detections[object["id"]] = object["sub_label"][0]
else:
segment.detections[object["id"]] = f'{object["label"]}-verified'
segment.sub_labels[object["id"]] = object["sub_label"][0]
segment.sub_labels.add(object["sub_label"][0])
# if object is alert label
# and has entered required zones or required zones is not set
@@ -293,34 +292,36 @@ class ReviewSegmentMaintainer(threading.Thread):
if should_update:
try:
frame_id = f"{camera_config.name}{frame_time}"
yuv_frame = self.frame_manager.get(
frame_name, camera_config.frame_shape_yuv
frame_id, camera_config.frame_shape_yuv
)
if yuv_frame is None:
logger.debug(f"Failed to get frame {frame_name} from SHM")
logger.debug(f"Failed to get frame {frame_id} from SHM")
return
self._publish_segment_update(
segment, camera_config, yuv_frame, active_objects, prev_data
)
self.frame_manager.close(frame_name)
self.frame_manager.close(frame_id)
except FileNotFoundError:
return
if not has_activity:
if not segment.has_frame:
try:
frame_id = f"{camera_config.name}{frame_time}"
yuv_frame = self.frame_manager.get(
frame_name, camera_config.frame_shape_yuv
frame_id, camera_config.frame_shape_yuv
)
if yuv_frame is None:
logger.debug(f"Failed to get frame {frame_name} from SHM")
logger.debug(f"Failed to get frame {frame_id} from SHM")
return
segment.save_full_frame(camera_config, yuv_frame)
self.frame_manager.close(frame_name)
self.frame_manager.close(frame_id)
self._publish_segment_update(
segment, camera_config, None, [], prev_data
)
@@ -337,7 +338,6 @@ class ReviewSegmentMaintainer(threading.Thread):
def check_if_new_segment(
self,
camera: str,
frame_name: str,
frame_time: float,
objects: list[TrackedObject],
) -> None:
@@ -347,7 +347,7 @@ class ReviewSegmentMaintainer(threading.Thread):
if len(active_objects) > 0:
detections: dict[str, str] = {}
sub_labels: dict[str, str] = {}
sub_labels = set()
zones: list[str] = []
severity = None
@@ -358,7 +358,7 @@ class ReviewSegmentMaintainer(threading.Thread):
detections[object["id"]] = object["sub_label"][0]
else:
detections[object["id"]] = f'{object["label"]}-verified'
sub_labels[object["id"]] = object["sub_label"][0]
sub_labels.add(object["sub_label"][0])
# if object is alert label
# and has entered required zones or required zones is not set
@@ -414,18 +414,19 @@ class ReviewSegmentMaintainer(threading.Thread):
)
try:
frame_id = f"{camera_config.name}{frame_time}"
yuv_frame = self.frame_manager.get(
frame_name, camera_config.frame_shape_yuv
frame_id, camera_config.frame_shape_yuv
)
if yuv_frame is None:
logger.debug(f"Failed to get frame {frame_name} from SHM")
logger.debug(f"Failed to get frame {frame_id} from SHM")
return
self.active_review_segments[camera].update_frame(
camera_config, yuv_frame, active_objects
)
self.frame_manager.close(frame_name)
self.frame_manager.close(frame_id)
self._publish_segment_start(self.active_review_segments[camera])
except FileNotFoundError:
return
@@ -453,17 +454,16 @@ class ReviewSegmentMaintainer(threading.Thread):
if topic == DetectionTypeEnum.video:
(
camera,
frame_name,
frame_time,
current_tracked_objects,
_,
_,
motion_boxes,
regions,
) = data
elif topic == DetectionTypeEnum.audio:
(
camera,
frame_time,
_,
dBFS,
audio_detections,
) = data
elif topic == DetectionTypeEnum.api:
@@ -480,9 +480,7 @@ class ReviewSegmentMaintainer(threading.Thread):
if not self.config.cameras[camera].record.enabled:
if current_segment:
self.update_existing_segment(
current_segment, frame_name, frame_time, []
)
self.update_existing_segment(current_segment, frame_time, [])
continue
@@ -490,7 +488,6 @@ class ReviewSegmentMaintainer(threading.Thread):
if topic == DetectionTypeEnum.video:
self.update_existing_segment(
current_segment,
frame_name,
frame_time,
current_tracked_objects,
)
@@ -541,7 +538,6 @@ class ReviewSegmentMaintainer(threading.Thread):
if topic == DetectionTypeEnum.video:
self.check_if_new_segment(
camera,
frame_name,
frame_time,
current_tracked_objects,
)
@@ -570,7 +566,7 @@ class ReviewSegmentMaintainer(threading.Thread):
frame_time,
severity,
{},
{},
set(),
[],
detections,
)
@@ -580,7 +576,7 @@ class ReviewSegmentMaintainer(threading.Thread):
frame_time,
SeverityEnum.alert,
{manual_info["event_id"]: manual_info["label"]},
{},
set(),
[],
set(),
)

View File

@@ -1,4 +0,0 @@
from .multiprocessing import ServiceProcess
from .service import Service, ServiceManager
__all__ = ["Service", "ServiceProcess", "ServiceManager"]

View File

@@ -1,164 +0,0 @@
import asyncio
import faulthandler
import logging
import multiprocessing as mp
import signal
import sys
import threading
from abc import ABC, abstractmethod
from asyncio.exceptions import TimeoutError
from logging.handlers import QueueHandler
from types import FrameType
from typing import Optional
import frigate.log
from .multiprocessing_waiter import wait as mp_wait
from .service import Service, ServiceManager
DEFAULT_STOP_TIMEOUT = 10 # seconds
class BaseServiceProcess(Service, ABC):
"""A Service the manages a multiprocessing.Process."""
_process: Optional[mp.Process]
def __init__(
self,
*,
name: Optional[str] = None,
manager: Optional[ServiceManager] = None,
) -> None:
super().__init__(name=name, manager=manager)
self._process = None
async def on_start(self) -> None:
if self._process is not None:
if self._process.is_alive():
return # Already started.
else:
self._process.close()
# At this point, the process is either stopped or dead, so we can recreate it.
self._process = mp.Process(target=self._run)
self._process.name = self.name
self._process.daemon = True
self.before_start()
self._process.start()
self.after_start()
self.manager.logger.info(f"Started {self.name} (pid: {self._process.pid})")
async def on_stop(
self,
*,
force: bool = False,
timeout: Optional[float] = None,
) -> None:
if timeout is None:
timeout = DEFAULT_STOP_TIMEOUT
if self._process is None:
return # Already stopped.
running = True
if not force:
self._process.terminate()
try:
await asyncio.wait_for(mp_wait(self._process), timeout)
running = False
except TimeoutError:
self.manager.logger.warning(
f"{self.name} is still running after "
f"{timeout} seconds. Killing."
)
if running:
self._process.kill()
await mp_wait(self._process)
self._process.close()
self._process = None
self.manager.logger.info(f"{self.name} stopped")
@property
def pid(self) -> Optional[int]:
return self._process.pid if self._process else None
def _run(self) -> None:
self.before_run()
self.run()
self.after_run()
def before_start(self) -> None:
pass
def after_start(self) -> None:
pass
def before_run(self) -> None:
pass
def after_run(self) -> None:
pass
@abstractmethod
def run(self) -> None:
pass
def __getstate__(self) -> dict:
return {
k: v
for k, v in self.__dict__.items()
if not (k.startswith("_Service__") or k == "_process")
}
class ServiceProcess(BaseServiceProcess):
logger: logging.Logger
@property
def stop_event(self) -> threading.Event:
# Lazily create the stop_event. This allows the signal handler to tell if anyone is
# monitoring the stop event, and to raise a SystemExit if not.
if "stop_event" not in self.__dict__:
stop_event = threading.Event()
self.__dict__["stop_event"] = stop_event
else:
stop_event = self.__dict__["stop_event"]
assert isinstance(stop_event, threading.Event)
return stop_event
def before_start(self) -> None:
if frigate.log.log_listener is None:
raise RuntimeError("Logging has not yet been set up.")
self.__log_queue = frigate.log.log_listener.queue
def before_run(self) -> None:
super().before_run()
faulthandler.enable()
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
# Get the stop_event through the dict to bypass lazy initialization.
stop_event = self.__dict__.get("stop_event")
if stop_event is not None:
# Someone is monitoring stop_event. We should set it.
stop_event.set()
else:
# Nobody is monitoring stop_event. We should raise SystemExit.
sys.exit()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
self.logger = logging.getLogger(self.name)
logging.basicConfig(handlers=[], force=True)
logging.getLogger().addHandler(QueueHandler(self.__log_queue))
del self.__log_queue

View File

@@ -1,150 +0,0 @@
import asyncio
import functools
import logging
import multiprocessing as mp
import queue
import threading
from multiprocessing.connection import Connection
from multiprocessing.connection import wait as mp_wait
from socket import socket
from typing import Any, Optional, Union
logger = logging.getLogger(__name__)
class MultiprocessingWaiter(threading.Thread):
"""A background thread that manages futures for the multiprocessing.connection.wait() method."""
def __init__(self) -> None:
super().__init__(daemon=True)
# Queue of objects to wait for and futures to set results for.
self._queue: queue.Queue[tuple[Any, asyncio.Future[None]]] = queue.Queue()
# This is required to get mp_wait() to wake up when new objects to wait for are received.
receive, send = mp.Pipe(duplex=False)
self._receive_connection = receive
self._send_connection = send
def wait_for_sentinel(self, sentinel: Any) -> asyncio.Future[None]:
"""Create an asyncio.Future tracking a sentinel for multiprocessing.connection.wait()
Warning: This method is NOT thread-safe.
"""
# This would be incredibly stupid, but you never know.
assert sentinel != self._receive_connection
# Send the future to the background thread for processing.
future = asyncio.get_running_loop().create_future()
self._queue.put((sentinel, future))
# Notify the background thread.
#
# This is the non-thread-safe part, but since this method is not really meant to be called
# by users, we can get away with not adding a lock at this point (to avoid adding 2 locks).
self._send_connection.send_bytes(b".")
return future
def run(self) -> None:
logger.debug("Started background thread")
wait_dict: dict[Any, set[asyncio.Future[None]]] = {
self._receive_connection: set()
}
while True:
for ready_obj in mp_wait(wait_dict.keys()):
# Make sure we never remove the receive connection from the wait dict
if ready_obj is self._receive_connection:
continue
logger.debug(
f"Sentinel {ready_obj!r} is ready. "
f"Notifying {len(wait_dict[ready_obj])} future(s)."
)
# Go over all the futures attached to this object and mark them as ready.
for fut in wait_dict.pop(ready_obj):
if fut.cancelled():
logger.debug(
f"A future for sentinel {ready_obj!r} is ready, "
"but the future is cancelled. Skipping."
)
else:
fut.get_loop().call_soon_threadsafe(
# Note: We need to check fut.cancelled() again, since it might
# have been set before the event loop's definition of "soon".
functools.partial(
lambda fut: fut.cancelled() or fut.set_result(None), fut
)
)
# Check for cancellations in the remaining futures.
done_objects = []
for obj, fut_set in wait_dict.items():
if obj is self._receive_connection:
continue
# Find any cancelled futures and remove them.
cancelled = [fut for fut in fut_set if fut.cancelled()]
fut_set.difference_update(cancelled)
logger.debug(
f"Removing {len(cancelled)} future(s) from sentinel: {obj!r}"
)
# Mark objects with no remaining futures for removal.
if len(fut_set) == 0:
done_objects.append(obj)
# Remove any objects that are done after removing cancelled futures.
for obj in done_objects:
logger.debug(
f"Sentinel {obj!r} no longer has any futures waiting for it."
)
del wait_dict[obj]
# Get new objects to wait for from the queue.
while True:
try:
obj, fut = self._queue.get_nowait()
self._receive_connection.recv_bytes(maxlength=1)
self._queue.task_done()
logger.debug(f"Received new sentinel: {obj!r}")
wait_dict.setdefault(obj, set()).add(fut)
except queue.Empty:
break
waiter_lock = threading.Lock()
waiter_thread: Optional[MultiprocessingWaiter] = None
async def wait(object: Union[mp.Process, Connection, socket]) -> None:
"""Wait for the supplied object to be ready.
Under the hood, this uses multiprocessing.connection.wait() and a background thread manage the
returned futures.
"""
global waiter_thread, waiter_lock
sentinel: Union[Connection, socket, int]
if isinstance(object, mp.Process):
sentinel = object.sentinel
elif isinstance(object, Connection) or isinstance(object, socket):
sentinel = object
else:
raise ValueError(f"Cannot wait for object of type {type(object).__qualname__}")
with waiter_lock:
if waiter_thread is None:
# Start a new waiter thread.
waiter_thread = MultiprocessingWaiter()
waiter_thread.start()
# Create the future while still holding the lock,
# since wait_for_sentinel() is not thread safe.
fut = waiter_thread.wait_for_sentinel(sentinel)
await fut

View File

@@ -1,446 +0,0 @@
from __future__ import annotations
import asyncio
import atexit
import logging
import threading
from abc import ABC, abstractmethod
from contextvars import ContextVar
from dataclasses import dataclass
from functools import partial
from typing import Coroutine, Optional, Union, cast
from typing_extensions import Self
class Service(ABC):
"""An abstract service instance."""
def __init__(
self,
*,
name: Optional[str] = None,
manager: Optional[ServiceManager] = None,
):
if name:
self.__dict__["name"] = name
self.__manager = manager or ServiceManager.current()
self.__lock = asyncio.Lock(loop=self.__manager._event_loop)
self.__manager._register(self)
@property
def name(self) -> str:
try:
return cast(str, self.__dict__["name"])
except KeyError:
return type(self).__qualname__
@property
def manager(self) -> ServiceManager:
"""The service manager this service is registered with."""
try:
return self.__manager
except AttributeError:
raise RuntimeError("Cannot access associated service manager")
def start(
self,
*,
wait: bool = False,
wait_timeout: Optional[float] = None,
) -> Self:
"""Start this service.
:param wait: If set, this function will block until the task is complete.
:param wait_timeout: If set, this function will not return until the task is complete or the
specified timeout has elapsed.
"""
self.manager.run_task(
self.on_start(),
wait=wait,
wait_timeout=wait_timeout,
lock=self.__lock,
)
return self
def stop(
self,
*,
force: bool = False,
timeout: Optional[float] = None,
wait: bool = False,
wait_timeout: Optional[float] = None,
) -> Self:
"""Stop this service.
:param force: If set, the service will be killed immediately.
:param timeout: Maximum amount of time to wait before force-killing the service.
:param wait: If set, this function will block until the task is complete.
:param wait_timeout: If set, this function will not return until the task is complete or the
specified timeout has elapsed.
"""
self.manager.run_task(
self.on_stop(force=force, timeout=timeout),
wait=wait,
wait_timeout=wait_timeout,
lock=self.__lock,
)
return self
def restart(
self,
*,
force: bool = False,
stop_timeout: Optional[float] = None,
wait: bool = False,
wait_timeout: Optional[float] = None,
) -> Self:
"""Restart this service.
:param force: If set, the service will be killed immediately.
:param timeout: Maximum amount of time to wait before force-killing the service.
:param wait: If set, this function will block until the task is complete.
:param wait_timeout: If set, this function will not return until the task is complete or the
specified timeout has elapsed.
"""
self.manager.run_task(
self.on_restart(force=force, stop_timeout=stop_timeout),
wait=wait,
wait_timeout=wait_timeout,
lock=self.__lock,
)
return self
@abstractmethod
async def on_start(self) -> None:
pass
@abstractmethod
async def on_stop(
self,
*,
force: bool = False,
timeout: Optional[float] = None,
) -> None:
pass
async def on_restart(
self,
*,
force: bool = False,
stop_timeout: Optional[float] = None,
) -> None:
await self.on_stop(force=force, timeout=stop_timeout)
await self.on_start()
default_service_manager_lock = threading.Lock()
default_service_manager: Optional[ServiceManager] = None
current_service_manager: ContextVar[ServiceManager] = ContextVar(
"current_service_manager"
)
@dataclass
class Command:
"""A coroutine to execute in the service manager thread.
Attributes:
coro: The coroutine to execute.
lock: An async lock to acquire before calling the coroutine.
done: If specified, the service manager will set this event after the command completes.
"""
coro: Coroutine
lock: Optional[asyncio.Lock] = None
done: Optional[threading.Event] = None
class ServiceManager:
"""A set of services, along with the global state required to manage them efficiently.
Typically users of the service infrastructure will not interact with a service manager directly,
but rather through individual Service subclasses that will automatically manage a service
manager instance.
Each service manager instance has a background thread in which service lifecycle tasks are
executed in an async executor. This is done to avoid head-of-line blocking in the business logic
that spins up individual services. This thread is automatically started when the service manager
is created and stopped either manually, or on application exit.
All (public) service manager methods are thread-safe.
"""
_name: str
_logger: logging.Logger
# The set of services this service manager knows about.
_services: dict[str, Service]
_services_lock: threading.Lock
# Commands will be queued with associated event loop. Queueing `None` signals shutdown.
_command_queue: asyncio.Queue[Union[Command, None]]
_event_loop: asyncio.AbstractEventLoop
# The pending command counter is used to ensure all commands have been queued before shutdown.
_pending_commands: AtomicCounter
# The set of pending tasks after they have been received by the background thread and spawned.
_tasks: set
# Fired after the async runtime starts. Object initialization completes after this is set.
_setup_event: threading.Event
# Will be acquired to ensure the shutdown sentinel is sent only once. Never released.
_shutdown_lock: threading.Lock
def __init__(self, *, name: Optional[str] = None):
self._name = name if name is not None else (__package__ or __name__)
self._logger = logging.getLogger(self.name)
self._services = dict()
self._services_lock = threading.Lock()
self._pending_commands = AtomicCounter()
self._tasks = set()
self._shutdown_lock = threading.Lock()
# --- Start the manager thread and wait for it to be ready. ---
self._setup_event = threading.Event()
async def start_manager() -> None:
self._event_loop = asyncio.get_running_loop()
self._command_queue = asyncio.Queue()
self._setup_event.set()
await self._monitor_command_queue()
self._manager_thread = threading.Thread(
name=self.name,
target=lambda: asyncio.run(start_manager()),
daemon=True,
)
self._manager_thread.start()
atexit.register(partial(self.shutdown, wait=True))
self._setup_event.wait()
@property
def name(self) -> str:
"""The name of this service manager. Primarily intended for logging purposes."""
return self._name
@property
def logger(self) -> logging.Logger:
"""The logger used by this service manager."""
return self._logger
@classmethod
def current(cls) -> ServiceManager:
"""The service manager set in the current context (async task or thread).
A global default service manager will be automatically created on first access."""
global default_service_manager
current = current_service_manager.get(None)
if current is None:
with default_service_manager_lock:
if default_service_manager is None:
default_service_manager = cls()
current = default_service_manager
current_service_manager.set(current)
return current
def make_current(self) -> None:
"""Make this the current service manager."""
current_service_manager.set(self)
def run_task(
self,
coro: Coroutine,
*,
wait: bool = False,
wait_timeout: Optional[float] = None,
lock: Optional[asyncio.Lock] = None,
) -> None:
"""Run an async task in the service manager thread.
:param wait: If set, this function will block until the task is complete.
:param wait_timeout: If set, this function will not return until the task is complete or the
specified timeout has elapsed.
"""
if not isinstance(coro, Coroutine):
raise TypeError(f"Cannot schedule task for object of type {type(coro)}")
cmd = Command(coro=coro, lock=lock)
if wait or wait_timeout is not None:
cmd.done = threading.Event()
self._send_command(cmd)
if cmd.done is not None:
cmd.done.wait(timeout=wait_timeout)
def shutdown(
self, *, wait: bool = False, wait_timeout: Optional[float] = None
) -> None:
"""Shutdown the service manager thread.
After the shutdown process completes, any subsequent calls to the service manager will
produce an error.
:param wait: If set, this function will block until the shutdown process is complete.
:param wait_timeout: If set, this function will not return until the shutdown process is
complete or the specified timeout has elapsed.
"""
if self._shutdown_lock.acquire(blocking=False):
self._send_command(None)
if wait:
self._manager_thread.join(timeout=wait_timeout)
def _ensure_running(self) -> None:
self._setup_event.wait()
if not self._manager_thread.is_alive():
raise RuntimeError(f"ServiceManager {self.name} is not running")
def _send_command(self, command: Union[Command, None]) -> None:
self._ensure_running()
async def queue_command() -> None:
await self._command_queue.put(command)
self._pending_commands.sub()
self._pending_commands.add()
asyncio.run_coroutine_threadsafe(queue_command(), self._event_loop)
def _register(self, service: Service) -> None:
"""Register a service with the service manager. This is done by the service constructor."""
self._ensure_running()
with self._services_lock:
name_conflict: Optional[Service] = next(
(
existing
for name, existing in self._services.items()
if name == service.name
),
None,
)
if name_conflict is service:
raise RuntimeError(f"Attempt to re-register service: {service.name}")
elif name_conflict is not None:
raise RuntimeError(f"Duplicate service name: {service.name}")
self.logger.debug(f"Registering service: {service.name}")
self._services[service.name] = service
def _run_command(self, command: Command) -> None:
"""Execute a command and add it to the tasks set."""
def task_done(task: asyncio.Task) -> None:
exc = task.exception()
if exc:
self.logger.exception("Exception in service manager task", exc_info=exc)
self._tasks.discard(task)
if command.done is not None:
command.done.set()
async def task_harness() -> None:
if command.lock is not None:
async with command.lock:
await command.coro
else:
await command.coro
task = asyncio.create_task(task_harness())
task.add_done_callback(task_done)
self._tasks.add(task)
async def _monitor_command_queue(self) -> None:
"""The main function of the background thread."""
self.logger.info("Started service manager")
# Main command processing loop.
while (command := await self._command_queue.get()) is not None:
self._run_command(command)
# Send a stop command to all services. We don't have a status command yet, so we can just
# stop everything and be done with it.
with self._services_lock:
self.logger.debug(f"Stopping {len(self._services)} services")
for service in self._services.values():
service.stop()
# Wait for all commands to finish executing.
await self._shutdown()
self.logger.info("Exiting service manager")
async def _shutdown(self) -> None:
"""Ensure all commands have been queued & executed."""
while True:
command = None
try:
# Try and get a command from the queue.
command = self._command_queue.get_nowait()
except asyncio.QueueEmpty:
if self._pending_commands.value > 0:
# If there are pending commands to queue, await them.
command = await self._command_queue.get()
elif self._tasks:
# If there are still pending tasks, wait for them. These tasks might queue
# commands though, so we have to loop again.
await asyncio.wait(self._tasks)
else:
# Nothing is pending at this point, so we're done here.
break
# If we got a command, run it.
if command is not None:
self._run_command(command)
class AtomicCounter:
"""A lock-protected atomic counter."""
# Modern CPUs have atomics, but python doesn't seem to include them in the standard library.
# Besides, the performance penalty is negligible compared to, well, using python.
# So this will do just fine.
def __init__(self, initial: int = 0):
self._lock = threading.Lock()
self._value = initial
def add(self, value: int = 1) -> None:
with self._lock:
self._value += value
def sub(self, value: int = 1) -> None:
with self._lock:
self._value -= value
@property
def value(self) -> int:
with self._lock:
return self._value

View File

@@ -197,8 +197,8 @@ async def set_gpu_stats(
# intel QSV GPU
intel_usage = get_intel_gpu_stats()
if intel_usage is not None:
stats["intel-qsv"] = intel_usage or {"gpu": "", "mem": ""}
if intel_usage:
stats["intel-qsv"] = intel_usage
else:
stats["intel-qsv"] = {"gpu": "", "mem": ""}
hwaccel_errors.append(args)
@@ -222,8 +222,8 @@ async def set_gpu_stats(
# intel VAAPI GPU
intel_usage = get_intel_gpu_stats()
if intel_usage is not None:
stats["intel-vaapi"] = intel_usage or {"gpu": "", "mem": ""}
if intel_usage:
stats["intel-vaapi"] = intel_usage
else:
stats["intel-vaapi"] = {"gpu": "", "mem": ""}
hwaccel_errors.append(args)

View File

@@ -1,162 +0,0 @@
import datetime
import logging
import os
import unittest
from peewee_migrate import Router
from playhouse.sqlite_ext import SqliteExtDatabase
from playhouse.sqliteq import SqliteQueueDatabase
from frigate.api.fastapi_app import create_fastapi_app
from frigate.config import FrigateConfig
from frigate.models import Event, ReviewSegment
from frigate.review.maintainer import SeverityEnum
from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS
class BaseTestHttp(unittest.TestCase):
def setUp(self, models):
# setup clean database for each test run
migrate_db = SqliteExtDatabase("test.db")
del logging.getLogger("peewee_migrate").handlers[:]
router = Router(migrate_db)
router.run()
migrate_db.close()
self.db = SqliteQueueDatabase(TEST_DB)
self.db.bind(models)
self.minimal_config = {
"mqtt": {"host": "mqtt"},
"cameras": {
"front_door": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
}
},
}
self.test_stats = {
"detection_fps": 13.7,
"detectors": {
"cpu1": {
"detection_start": 0.0,
"inference_speed": 91.43,
"pid": 42,
},
"cpu2": {
"detection_start": 0.0,
"inference_speed": 84.99,
"pid": 44,
},
},
"front_door": {
"camera_fps": 0.0,
"capture_pid": 53,
"detection_fps": 0.0,
"pid": 52,
"process_fps": 0.0,
"skipped_fps": 0.0,
},
"service": {
"storage": {
"/dev/shm": {
"free": 50.5,
"mount_type": "tmpfs",
"total": 67.1,
"used": 16.6,
},
"/media/frigate/clips": {
"free": 42429.9,
"mount_type": "ext4",
"total": 244529.7,
"used": 189607.0,
},
"/media/frigate/recordings": {
"free": 0.2,
"mount_type": "ext4",
"total": 8.0,
"used": 7.8,
},
"/tmp/cache": {
"free": 976.8,
"mount_type": "tmpfs",
"total": 1000.0,
"used": 23.2,
},
},
"uptime": 101113,
"version": "0.10.1",
"latest_version": "0.11",
},
}
def tearDown(self):
if not self.db.is_closed():
self.db.close()
try:
for file in TEST_DB_CLEANUPS:
os.remove(file)
except OSError:
pass
def create_app(self, stats=None):
return create_fastapi_app(
FrigateConfig(**self.minimal_config),
self.db,
None,
None,
None,
None,
None,
stats,
None,
)
def insert_mock_event(
self,
id: str,
start_time: datetime.datetime = datetime.datetime.now().timestamp(),
) -> Event:
"""Inserts a basic event model with a given id."""
return Event.insert(
id=id,
label="Mock",
camera="front_door",
start_time=start_time,
end_time=start_time + 20,
top_score=100,
false_positive=False,
zones=list(),
thumbnail="",
region=[],
box=[],
area=0,
has_clip=True,
has_snapshot=True,
).execute()
def insert_mock_review_segment(
self,
id: str,
start_time: datetime.datetime = datetime.datetime.now().timestamp(),
end_time: datetime.datetime = datetime.datetime.now().timestamp() + 20,
) -> Event:
"""Inserts a basic event model with a given id."""
return ReviewSegment.insert(
id=id,
camera="front_door",
start_time=start_time,
end_time=end_time,
has_been_reviewed=False,
severity=SeverityEnum.alert,
thumb_path=False,
data={},
).execute()

View File

@@ -1,110 +0,0 @@
import datetime
from fastapi.testclient import TestClient
from frigate.models import Event, ReviewSegment
from frigate.test.http_api.base_http_test import BaseTestHttp
class TestHttpReview(BaseTestHttp):
def setUp(self):
super().setUp([Event, ReviewSegment])
# Does not return any data point since the end time (before parameter) is not passed and the review segment end_time is 2 seconds from now
def test_get_review_no_filters_no_matches(self):
app = super().create_app()
now = datetime.datetime.now().timestamp()
with TestClient(app) as client:
super().insert_mock_review_segment("123456.random", now, now + 2)
reviews_response = client.get("/review")
assert reviews_response.status_code == 200
reviews_in_response = reviews_response.json()
assert len(reviews_in_response) == 0
def test_get_review_no_filters(self):
app = super().create_app()
now = datetime.datetime.now().timestamp()
with TestClient(app) as client:
super().insert_mock_review_segment("123456.random", now - 2, now - 1)
reviews_response = client.get("/review")
assert reviews_response.status_code == 200
reviews_in_response = reviews_response.json()
assert len(reviews_in_response) == 1
def test_get_review_with_time_filter_no_matches(self):
app = super().create_app()
now = datetime.datetime.now().timestamp()
with TestClient(app) as client:
id = "123456.random"
super().insert_mock_review_segment(id, now, now + 2)
params = {
"after": now,
"before": now + 3,
}
reviews_response = client.get("/review", params=params)
assert reviews_response.status_code == 200
reviews_in_response = reviews_response.json()
assert len(reviews_in_response) == 0
def test_get_review_with_time_filter(self):
app = super().create_app()
now = datetime.datetime.now().timestamp()
with TestClient(app) as client:
id = "123456.random"
super().insert_mock_review_segment(id, now, now + 2)
params = {
"after": now - 1,
"before": now + 3,
}
reviews_response = client.get("/review", params=params)
assert reviews_response.status_code == 200
reviews_in_response = reviews_response.json()
assert len(reviews_in_response) == 1
assert reviews_in_response[0]["id"] == id
def test_get_review_with_limit_filter(self):
app = super().create_app()
now = datetime.datetime.now().timestamp()
with TestClient(app) as client:
id = "123456.random"
id2 = "654321.random"
super().insert_mock_review_segment(id, now, now + 2)
super().insert_mock_review_segment(id2, now + 1, now + 2)
params = {
"limit": 1,
"after": now,
"before": now + 3,
}
reviews_response = client.get("/review", params=params)
assert reviews_response.status_code == 200
reviews_in_response = reviews_response.json()
assert len(reviews_in_response) == 1
assert reviews_in_response[0]["id"] == id2
def test_get_review_with_all_filters(self):
app = super().create_app()
now = datetime.datetime.now().timestamp()
with TestClient(app) as client:
id = "123456.random"
super().insert_mock_review_segment(id, now, now + 2)
params = {
"cameras": "front_door",
"labels": "all",
"zones": "all",
"reviewed": 0,
"limit": 1,
"severity": "alert",
"after": now - 1,
"before": now + 3,
}
reviews_response = client.get("/review", params=params)
assert reviews_response.status_code == 200
reviews_in_response = reviews_response.json()
assert len(reviews_in_response) == 1
assert reviews_in_response[0]["id"] == id

View File

@@ -9,7 +9,5 @@ class ObjectTracker(ABC):
pass
@abstractmethod
def match_and_update(
self, frame_name: str, frame_time: float, detections: list[dict[str, any]]
) -> None:
def match_and_update(self, frame_time: float, detections) -> None:
pass

View File

@@ -129,7 +129,7 @@ class CentroidTracker(ObjectTracker):
self.tracked_objects[id].update(new_obj)
def update_frame_times(self, frame_name, frame_time):
def update_frame_times(self, frame_time):
for id in list(self.tracked_objects.keys()):
self.tracked_objects[id]["frame_time"] = frame_time
self.tracked_objects[id]["motionless_count"] += 1

View File

@@ -268,7 +268,7 @@ class NorfairTracker(ObjectTracker):
self.tracked_objects[id].update(obj)
def update_frame_times(self, frame_name: str, frame_time: float):
def update_frame_times(self, frame_time):
# if the object was there in the last frame, assume it's still there
detections = [
(
@@ -282,11 +282,9 @@ class NorfairTracker(ObjectTracker):
for id, obj in self.tracked_objects.items()
if self.disappeared[id] == 0
]
self.match_and_update(frame_name, frame_time, detections=detections)
self.match_and_update(frame_time, detections=detections)
def match_and_update(
self, frame_name: str, frame_time: float, detections: list[dict[str, any]]
):
def match_and_update(self, frame_time, detections):
norfair_detections = []
for obj in detections:
@@ -324,7 +322,7 @@ class NorfairTracker(ObjectTracker):
)
coord_transformations = self.ptz_motion_estimator.motion_estimator(
detections, frame_name, frame_time, self.camera_name
detections, frame_time, self.camera_name
)
tracked_objects = self.tracker.update(

View File

@@ -4,7 +4,6 @@ import base64
import logging
from collections import defaultdict
from statistics import median
from typing import Optional
import cv2
import numpy as np
@@ -424,11 +423,10 @@ class TrackedObjectAttribute:
"box": self.box,
}
def find_best_object(self, objects: list[dict[str, any]]) -> Optional[str]:
def find_best_object(self, objects: list[dict[str, any]]) -> str:
"""Find the best attribute for each object and return its ID."""
best_object_area = None
best_object_id = None
best_object_label = None
for obj in objects:
if not box_inside(obj["box"], self.box):
@@ -442,15 +440,8 @@ class TrackedObjectAttribute:
if best_object_area is None:
best_object_area = object_area
best_object_id = obj["id"]
best_object_label = obj["label"]
else:
if best_object_label == "car" and obj["label"] == "car":
# if multiple cars are overlapping with the same label then the label will not be assigned
return None
elif object_area < best_object_area:
# if a car and person are overlapping then assign the label to the smaller object (which should be the person)
best_object_area = object_area
best_object_id = obj["id"]
best_object_label = obj["label"]
elif object_area < best_object_area:
best_object_area = object_area
best_object_id = obj["id"]
return best_object_id

View File

@@ -19,7 +19,3 @@ class ModelStatusTypesEnum(str, Enum):
downloading = "downloading"
downloaded = "downloaded"
error = "error"
class TrackedObjectUpdateTypesEnum(str, Enum):
description = "description"

View File

@@ -13,12 +13,12 @@ import urllib.parse
from collections.abc import Mapping
from pathlib import Path
from typing import Any, Optional, Tuple, Union
from zoneinfo import ZoneInfoNotFoundError
import numpy as np
import pytz
from ruamel.yaml import YAML
from tzlocal import get_localzone
from zoneinfo import ZoneInfoNotFoundError
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
@@ -282,17 +282,6 @@ def get_tomorrow_at_time(hour: int) -> datetime.datetime:
)
def is_current_hour(timestamp: int) -> bool:
"""Returns if timestamp is in the current UTC hour."""
start_of_next_hour = (
datetime.datetime.now(datetime.timezone.utc).replace(
minute=0, second=0, microsecond=0
)
+ datetime.timedelta(hours=1)
).timestamp()
return timestamp < start_of_next_hour
def clear_and_unlink(file: Path, missing_ok: bool = True) -> None:
"""clear file then unlink to avoid space retained by file descriptors."""
if not missing_ok and not file.exists():

Some files were not shown because too many files have changed in this diff Show More