Compare commits

..

3 Commits

Author SHA1 Message Date
Josh Hawkins
b0c4c77cfd update docs 2024-10-21 09:36:16 -05:00
Josh Hawkins
059475e6bb add try/except around ollama initialization 2024-10-21 09:31:34 -05:00
Josh Hawkins
8002e59031 disable mem arena in options for cpu only 2024-10-21 09:31:11 -05:00
232 changed files with 5558 additions and 12595 deletions

View File

@@ -12,7 +12,6 @@ argmax
argmin argmin
argpartition argpartition
ascontiguousarray ascontiguousarray
astype
authelia authelia
authentik authentik
autodetected autodetected
@@ -43,7 +42,6 @@ codeproject
colormap colormap
colorspace colorspace
comms comms
coro
ctypeslib ctypeslib
CUDA CUDA
Cuvid Cuvid
@@ -61,7 +59,6 @@ dsize
dtype dtype
ECONNRESET ECONNRESET
edgetpu edgetpu
fastapi
faststart faststart
fflags fflags
ffprobe ffprobe
@@ -196,7 +193,6 @@ poweroff
preexec preexec
probesize probesize
protobuf protobuf
pstate
psutil psutil
pubkey pubkey
putenv putenv
@@ -241,7 +237,6 @@ sleeptime
SNDMORE SNDMORE
socs socs
sqliteq sqliteq
sqlitevecq
ssdlite ssdlite
statm statm
stimeout stimeout
@@ -276,11 +271,9 @@ unraid
unreviewed unreviewed
userdata userdata
usermod usermod
uvicorn
vaapi vaapi
vainfo vainfo
variations variations
vbios
vconcat vconcat
vitb vitb
vstream vstream

View File

@@ -3,12 +3,10 @@
set -euxo pipefail set -euxo pipefail
# Cleanup the old github host key # Cleanup the old github host key
if [[ -f ~/.ssh/known_hosts ]]; then sed -i -e '/AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31\/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi\/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==/d' ~/.ssh/known_hosts
# Add new github host key # Add new github host key
sed -i -e '/AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31\/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi\/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==/d' ~/.ssh/known_hosts curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \
curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \ sed -e 's/^/github.com /' >> ~/.ssh/known_hosts
sed -e 's/^/github.com /' >> ~/.ssh/known_hosts
fi
# Frigate normal container runs as root, so it have permission to create # Frigate normal container runs as root, so it have permission to create
# the folders. But the devcontainer runs as the host user, so we need to # the folders. But the devcontainer runs as the host user, so we need to

View File

@@ -13,7 +13,6 @@
- [ ] New feature - [ ] New feature
- [ ] Breaking change (fix/feature causing existing functionality to break) - [ ] Breaking change (fix/feature causing existing functionality to break)
- [ ] Code quality improvements to existing code - [ ] Code quality improvements to existing code
- [ ] Documentation Update
## Additional information ## Additional information

View File

@@ -6,8 +6,6 @@ on:
branches: branches:
- dev - dev
- master - master
paths-ignore:
- "docs/**"
# only run the latest commit to avoid cache overwrites # only run the latest commit to avoid cache overwrites
concurrency: concurrency:
@@ -24,8 +22,6 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
id: setup id: setup
uses: ./.github/actions/setup uses: ./.github/actions/setup
@@ -47,8 +43,6 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
id: setup id: setup
uses: ./.github/actions/setup uses: ./.github/actions/setup
@@ -66,7 +60,7 @@ jobs:
${{ steps.setup.outputs.image-name }}-standard-arm64 ${{ steps.setup.outputs.image-name }}-standard-arm64
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64 cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
- name: Build and push RPi build - name: Build and push RPi build
uses: docker/bake-action@v6 uses: docker/bake-action@v4
with: with:
push: true push: true
targets: rpi targets: rpi
@@ -75,14 +69,21 @@ jobs:
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64 *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
- name: Build and push Rockchip build
uses: docker/bake-action@v3
with:
push: true
targets: rk
files: docker/rockchip/rk.hcl
set: |
rk.tags=${{ steps.setup.outputs.image-name }}-rk
*.cache-from=type=gha
jetson_jp4_build: jetson_jp4_build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Jetson Jetpack 4 name: Jetson Jetpack 4
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
id: setup id: setup
uses: ./.github/actions/setup uses: ./.github/actions/setup
@@ -94,7 +95,7 @@ jobs:
BASE_IMAGE: timongentzsch/l4t-ubuntu20-opencv:latest BASE_IMAGE: timongentzsch/l4t-ubuntu20-opencv:latest
SLIM_BASE: timongentzsch/l4t-ubuntu20-opencv:latest SLIM_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
TRT_BASE: timongentzsch/l4t-ubuntu20-opencv:latest TRT_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
uses: docker/bake-action@v6 uses: docker/bake-action@v4
with: with:
push: true push: true
targets: tensorrt targets: tensorrt
@@ -109,8 +110,6 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
id: setup id: setup
uses: ./.github/actions/setup uses: ./.github/actions/setup
@@ -122,7 +121,7 @@ jobs:
BASE_IMAGE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime BASE_IMAGE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
SLIM_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime SLIM_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
TRT_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime TRT_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
uses: docker/bake-action@v6 uses: docker/bake-action@v4
with: with:
push: true push: true
targets: tensorrt targets: tensorrt
@@ -139,8 +138,6 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
id: setup id: setup
uses: ./.github/actions/setup uses: ./.github/actions/setup
@@ -149,7 +146,7 @@ jobs:
- name: Build and push TensorRT (x86 GPU) - name: Build and push TensorRT (x86 GPU)
env: env:
COMPUTE_LEVEL: "50 60 70 80 90" COMPUTE_LEVEL: "50 60 70 80 90"
uses: docker/bake-action@v6 uses: docker/bake-action@v4
with: with:
push: true push: true
targets: tensorrt targets: tensorrt
@@ -166,15 +163,13 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
id: setup id: setup
uses: ./.github/actions/setup uses: ./.github/actions/setup
with: with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Rockchip build - name: Build and push Rockchip build
uses: docker/bake-action@v6 uses: docker/bake-action@v3
with: with:
push: true push: true
targets: rk targets: rk
@@ -191,15 +186,13 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx - name: Set up QEMU and Buildx
id: setup id: setup
uses: ./.github/actions/setup uses: ./.github/actions/setup
with: with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Hailo-8l build - name: Build and push Hailo-8l build
uses: docker/bake-action@v6 uses: docker/bake-action@v4
with: with:
push: true push: true
targets: h8l targets: h8l
@@ -212,7 +205,7 @@ jobs:
env: env:
AMDGPU: gfx AMDGPU: gfx
HSA_OVERRIDE: 0 HSA_OVERRIDE: 0
uses: docker/bake-action@v6 uses: docker/bake-action@v3
with: with:
push: true push: true
targets: rocm targets: rocm

View File

@@ -0,0 +1,24 @@
name: dependabot-auto-merge
on: pull_request
permissions:
contents: write
jobs:
dependabot-auto-merge:
runs-on: ubuntu-latest
if: github.actor == 'dependabot[bot]'
steps:
- name: Get Dependabot metadata
id: metadata
uses: dependabot/fetch-metadata@v2
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Enable auto-merge for Dependabot PRs
if: steps.metadata.outputs.dependency-type == 'direct:development' && (steps.metadata.outputs.update-type == 'version-update:semver-minor' || steps.metadata.outputs.update-type == 'version-update:semver-patch')
run: |
gh pr review --approve "$PR_URL"
gh pr merge --auto --squash "$PR_URL"
env:
PR_URL: ${{ github.event.pull_request.html_url }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,9 +1,6 @@
name: On pull request name: On pull request
on: on: pull_request
pull_request:
paths-ignore:
- "docs/**"
env: env:
DEFAULT_PYTHON: 3.9 DEFAULT_PYTHON: 3.9
@@ -19,8 +16,6 @@ jobs:
DOCKER_BUILDKIT: "1" DOCKER_BUILDKIT: "1"
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions/setup-node@master - uses: actions/setup-node@master
with: with:
node-version: 16.x node-version: 16.x
@@ -40,8 +35,6 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions/setup-node@master - uses: actions/setup-node@master
with: with:
node-version: 16.x node-version: 16.x
@@ -56,8 +49,6 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions/setup-node@master - uses: actions/setup-node@master
with: with:
node-version: 20.x node-version: 20.x
@@ -73,10 +64,8 @@ jobs:
steps: steps:
- name: Check out the repository - name: Check out the repository
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
uses: actions/setup-python@v5.3.0 uses: actions/setup-python@v5.1.0
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
- name: Install requirements - name: Install requirements
@@ -96,8 +85,6 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions/setup-node@master - uses: actions/setup-node@master
with: with:
node-version: 16.x node-version: 16.x

View File

@@ -11,8 +11,6 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- id: lowercaseRepo - id: lowercaseRepo
uses: ASzc/change-string-case-action@v6 uses: ASzc/change-string-case-action@v6
with: with:
@@ -24,13 +22,10 @@ jobs:
username: ${{ github.actor }} username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Create tag variables - name: Create tag variables
env:
TAG: ${{ github.ref_name }}
LOWERCASE_REPO: ${{ steps.lowercaseRepo.outputs.lowercase }}
run: | run: |
BUILD_TYPE=$([[ "${TAG}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "stable" || echo "beta") BUILD_TYPE=$([[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "stable" || echo "beta")
echo "BUILD_TYPE=${BUILD_TYPE}" >> $GITHUB_ENV echo "BUILD_TYPE=${BUILD_TYPE}" >> $GITHUB_ENV
echo "BASE=ghcr.io/${LOWERCASE_REPO}" >> $GITHUB_ENV echo "BASE=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}" >> $GITHUB_ENV
echo "BUILD_TAG=${GITHUB_SHA::7}" >> $GITHUB_ENV echo "BUILD_TAG=${GITHUB_SHA::7}" >> $GITHUB_ENV
echo "CLEAN_VERSION=$(echo ${GITHUB_REF##*/} | tr '[:upper:]' '[:lower:]' | sed 's/^[v]//')" >> $GITHUB_ENV echo "CLEAN_VERSION=$(echo ${GITHUB_REF##*/} | tr '[:upper:]' '[:lower:]' | sed 's/^[v]//')" >> $GITHUB_ENV
- name: Tag and push the main image - name: Tag and push the main image
@@ -39,14 +34,14 @@ jobs:
STABLE_TAG=${BASE}:stable STABLE_TAG=${BASE}:stable
PULL_TAG=${BASE}:${BUILD_TAG} PULL_TAG=${BASE}:${BUILD_TAG}
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG} docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG}
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk h8l rocm; do for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk; do
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant} docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant}
done done
# stable tag # stable tag
if [[ "${BUILD_TYPE}" == "stable" ]]; then if [[ "${BUILD_TYPE}" == "stable" ]]; then
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${STABLE_TAG} docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${STABLE_TAG}
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk h8l rocm; do for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk; do
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${STABLE_TAG}-${variant} docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${STABLE_TAG}-${variant}
done done
fi fi

View File

@@ -23,9 +23,7 @@ jobs:
exempt-pr-labels: "pinned,security,dependencies" exempt-pr-labels: "pinned,security,dependencies"
operations-per-run: 120 operations-per-run: 120
- name: Print outputs - name: Print outputs
env: run: echo ${{ join(steps.stale.outputs.*, ',') }}
STALE_OUTPUT: ${{ join(steps.stale.outputs.*, ',') }}
run: echo "$STALE_OUTPUT"
# clean_ghcr: # clean_ghcr:
# name: Delete outdated dev container images # name: Delete outdated dev container images
@@ -40,3 +38,4 @@ jobs:
# account-type: personal # account-type: personal
# token: ${{ secrets.GITHUB_TOKEN }} # token: ${{ secrets.GITHUB_TOKEN }}
# token-type: github-token # token-type: github-token

View File

@@ -23,7 +23,7 @@ services:
# count: 1 # count: 1
# capabilities: [gpu] # capabilities: [gpu]
environment: environment:
YOLO_MODELS: "" YOLO_MODELS: yolov7-320
devices: devices:
- /dev/bus/usb:/dev/bus/usb - /dev/bus/usb:/dev/bus/usb
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware # - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware

View File

@@ -16,25 +16,89 @@ RUN mkdir /h8l-wheels
# Build the wheels # Build the wheels
RUN pip3 wheel --wheel-dir=/h8l-wheels -c /requirements-wheels.txt -r /requirements-wheels-h8l.txt RUN pip3 wheel --wheel-dir=/h8l-wheels -c /requirements-wheels.txt -r /requirements-wheels-h8l.txt
FROM wget AS hailort # Build HailoRT and create wheel
FROM wheels AS build-hailort
ARG TARGETARCH ARG TARGETARCH
RUN --mount=type=bind,source=docker/hailo8l/install_hailort.sh,target=/deps/install_hailort.sh \
/deps/install_hailort.sh SHELL ["/bin/bash", "-c"]
# Install necessary APT packages
RUN apt-get -qq update \
&& apt-get -qq install -y \
apt-transport-https \
gnupg \
wget \
# the key fingerprint can be obtained from https://ftp-master.debian.org/keys.html
&& wget -qO- "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xA4285295FC7B1A81600062A9605C66F00D6C9793" | \
gpg --dearmor > /usr/share/keyrings/debian-archive-bullseye-stable.gpg \
&& echo "deb [signed-by=/usr/share/keyrings/debian-archive-bullseye-stable.gpg] http://deb.debian.org/debian bullseye main contrib non-free" | \
tee /etc/apt/sources.list.d/debian-bullseye-nonfree.list \
&& apt-get -qq update \
&& apt-get -qq install -y \
python3.9 \
python3.9-dev \
build-essential cmake git \
&& rm -rf /var/lib/apt/lists/*
# Extract Python version and set environment variables
RUN PYTHON_VERSION=$(python3 --version 2>&1 | awk '{print $2}' | cut -d. -f1,2) && \
PYTHON_VERSION_NO_DOT=$(echo $PYTHON_VERSION | sed 's/\.//') && \
echo "PYTHON_VERSION=$PYTHON_VERSION" > /etc/environment && \
echo "PYTHON_VERSION_NO_DOT=$PYTHON_VERSION_NO_DOT" >> /etc/environment
# Clone and build HailoRT
RUN . /etc/environment && \
git clone https://github.com/hailo-ai/hailort.git /opt/hailort && \
cd /opt/hailort && \
git checkout v4.18.0 && \
cmake -H. -Bbuild -DCMAKE_BUILD_TYPE=Release -DHAILO_BUILD_PYBIND=1 -DPYBIND11_PYTHON_VERSION=${PYTHON_VERSION} && \
cmake --build build --config release --target libhailort && \
cmake --build build --config release --target _pyhailort && \
cp build/hailort/libhailort/bindings/python/src/_pyhailort.cpython-${PYTHON_VERSION_NO_DOT}-$(if [ $TARGETARCH == "amd64" ]; then echo 'x86_64'; else echo 'aarch64'; fi )-linux-gnu.so hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/ && \
cp build/hailort/libhailort/src/libhailort.so hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/
RUN ls -ahl /opt/hailort/build/hailort/libhailort/src/
RUN ls -ahl /opt/hailort/hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/
# Remove the existing setup.py if it exists in the target directory
RUN rm -f /opt/hailort/hailort/libhailort/bindings/python/platform/setup.py
# Copy generate_wheel_conf.py and setup.py
COPY docker/hailo8l/pyhailort_build_scripts/generate_wheel_conf.py /opt/hailort/hailort/libhailort/bindings/python/platform/generate_wheel_conf.py
COPY docker/hailo8l/pyhailort_build_scripts/setup.py /opt/hailort/hailort/libhailort/bindings/python/platform/setup.py
# Run the generate_wheel_conf.py script
RUN python3 /opt/hailort/hailort/libhailort/bindings/python/platform/generate_wheel_conf.py
# Create a wheel file using pip3 wheel
RUN cd /opt/hailort/hailort/libhailort/bindings/python/platform && \
python3 setup.py bdist_wheel --dist-dir /hailo-wheels
# Use deps as the base image # Use deps as the base image
FROM deps AS h8l-frigate FROM deps AS h8l-frigate
# Copy the wheels from the wheels stage # Copy the wheels from the wheels stage
COPY --from=h8l-wheels /h8l-wheels /deps/h8l-wheels COPY --from=h8l-wheels /h8l-wheels /deps/h8l-wheels
COPY --from=hailort /hailo-wheels /deps/hailo-wheels COPY --from=build-hailort /hailo-wheels /deps/hailo-wheels
COPY --from=hailort /rootfs/ / COPY --from=build-hailort /etc/environment /etc/environment
RUN CC=$(python3 -c "import sysconfig; import shlex; cc = sysconfig.get_config_var('CC'); cc_cmd = shlex.split(cc)[0]; print(cc_cmd[:-4] if cc_cmd.endswith('-gcc') else cc_cmd)") && \
echo "CC=$CC" >> /etc/environment
# Install the wheels # Install the wheels
RUN pip3 install -U /deps/h8l-wheels/*.whl RUN pip3 install -U /deps/h8l-wheels/*.whl
RUN pip3 install -U /deps/hailo-wheels/*.whl RUN pip3 install -U /deps/hailo-wheels/*.whl
RUN . /etc/environment && \
mv /usr/local/lib/python${PYTHON_VERSION}/dist-packages/hailo_platform/pyhailort/libhailort.so /usr/lib/${CC} && \
cd /usr/lib/${CC}/ && \
ln -s libhailort.so libhailort.so.4.18.0
# Copy base files from the rootfs stage # Copy base files from the rootfs stage
COPY --from=rootfs / / COPY --from=rootfs / /
# Set environment variables for Hailo SDK
ENV PATH="/opt/hailort/bin:${PATH}"
ENV LD_LIBRARY_PATH="/usr/lib/$(if [ $TARGETARCH == "amd64" ]; then echo 'x86_64'; else echo 'aarch64'; fi )-linux-gnu:${LD_LIBRARY_PATH}"
# Set workdir # Set workdir
WORKDIR /opt/frigate/ WORKDIR /opt/frigate/

View File

@@ -1,9 +1,3 @@
target wget {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64","linux/amd64"]
target = "wget"
}
target wheels { target wheels {
dockerfile = "docker/main/Dockerfile" dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64","linux/amd64"] platforms = ["linux/arm64","linux/amd64"]
@@ -25,7 +19,6 @@ target rootfs {
target h8l { target h8l {
dockerfile = "docker/hailo8l/Dockerfile" dockerfile = "docker/hailo8l/Dockerfile"
contexts = { contexts = {
wget = "target:wget"
wheels = "target:wheels" wheels = "target:wheels"
deps = "target:deps" deps = "target:deps"
rootfs = "target:rootfs" rootfs = "target:rootfs"

View File

@@ -1,19 +0,0 @@
#!/bin/bash
set -euxo pipefail
hailo_version="4.19.0"
if [[ "${TARGETARCH}" == "amd64" ]]; then
arch="x86_64"
elif [[ "${TARGETARCH}" == "arm64" ]]; then
arch="aarch64"
fi
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${TARGETARCH}.tar.gz" |
tar -C / -xzf -
mkdir -p /hailo-wheels
wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp39-cp39-linux_${arch}.whl"

View File

@@ -0,0 +1,67 @@
import json
import os
import platform
import sys
import sysconfig
def extract_toolchain_info(compiler):
# Remove the "-gcc" or "-g++" suffix if present
if compiler.endswith("-gcc") or compiler.endswith("-g++"):
compiler = compiler.rsplit("-", 1)[0]
# Extract the toolchain and ABI part (e.g., "gnu")
toolchain_parts = compiler.split("-")
abi_conventions = next(
(part for part in toolchain_parts if part in ["gnu", "musl", "eabi", "uclibc"]),
"",
)
return abi_conventions
def generate_wheel_conf():
conf_file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "wheel_conf.json"
)
# Extract current system and Python version information
py_version = f"cp{sys.version_info.major}{sys.version_info.minor}"
arch = platform.machine()
system = platform.system().lower()
libc_version = platform.libc_ver()[1]
# Get the compiler information
compiler = sysconfig.get_config_var("CC")
abi_conventions = extract_toolchain_info(compiler)
# Create the new configuration data
new_conf_data = {
"py_version": py_version,
"arch": arch,
"system": system,
"libc_version": libc_version,
"abi": abi_conventions,
"extension": {
"posix": "so",
"nt": "pyd", # Windows
}[os.name],
}
# If the file exists, load the existing data
if os.path.isfile(conf_file_path):
with open(conf_file_path, "r") as conf_file:
conf_data = json.load(conf_file)
# Update the existing data with the new data
conf_data.update(new_conf_data)
else:
# If the file does not exist, use the new data
conf_data = new_conf_data
# Write the updated data to the file
with open(conf_file_path, "w") as conf_file:
json.dump(conf_data, conf_file, indent=4)
if __name__ == "__main__":
generate_wheel_conf()

View File

@@ -0,0 +1,111 @@
import json
import os
from setuptools import find_packages, setup
from wheel.bdist_wheel import bdist_wheel as orig_bdist_wheel
class NonPurePythonBDistWheel(orig_bdist_wheel):
"""Makes the wheel platform-dependent so it can be based on the _pyhailort architecture"""
def finalize_options(self):
orig_bdist_wheel.finalize_options(self)
self.root_is_pure = False
def _get_hailort_lib_path():
lib_filename = "libhailort.so"
lib_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
f"hailo_platform/pyhailort/{lib_filename}",
)
if os.path.exists(lib_path):
print(f"Found libhailort shared library at: {lib_path}")
else:
print(f"Error: libhailort shared library not found at: {lib_path}")
raise FileNotFoundError(f"libhailort shared library not found at: {lib_path}")
return lib_path
def _get_pyhailort_lib_path():
conf_file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "wheel_conf.json"
)
if not os.path.isfile(conf_file_path):
raise FileNotFoundError(f"Configuration file not found: {conf_file_path}")
with open(conf_file_path, "r") as conf_file:
content = json.load(conf_file)
py_version = content["py_version"]
arch = content["arch"]
system = content["system"]
extension = content["extension"]
abi = content["abi"]
# Construct the filename directly
lib_filename = f"_pyhailort.cpython-{py_version.split('cp')[1]}-{arch}-{system}-{abi}.{extension}"
lib_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
f"hailo_platform/pyhailort/{lib_filename}",
)
if os.path.exists(lib_path):
print(f"Found _pyhailort shared library at: {lib_path}")
else:
print(f"Error: _pyhailort shared library not found at: {lib_path}")
raise FileNotFoundError(
f"_pyhailort shared library not found at: {lib_path}"
)
return lib_path
def _get_package_paths():
packages = []
pyhailort_lib = _get_pyhailort_lib_path()
hailort_lib = _get_hailort_lib_path()
if pyhailort_lib:
packages.append(pyhailort_lib)
if hailort_lib:
packages.append(hailort_lib)
packages.append(os.path.abspath("hailo_tutorials/notebooks/*"))
packages.append(os.path.abspath("hailo_tutorials/hefs/*"))
return packages
if __name__ == "__main__":
setup(
author="Hailo team",
author_email="contact@hailo.ai",
cmdclass={
"bdist_wheel": NonPurePythonBDistWheel,
},
description="HailoRT",
entry_points={
"console_scripts": [
"hailo=hailo_platform.tools.hailocli.main:main",
]
},
install_requires=[
"argcomplete",
"contextlib2",
"future",
"netaddr",
"netifaces",
"verboselogs",
"numpy==1.23.3",
],
name="hailort",
package_data={
"hailo_platform": _get_package_paths(),
},
packages=find_packages(),
platforms=[
"linux_x86_64",
"linux_aarch64",
"win_amd64",
],
url="https://hailo.ai/",
version="4.17.0",
zip_safe=False,
)

View File

@@ -1,12 +1,12 @@
appdirs==1.4.* appdirs==1.4.4
argcomplete==2.0.* argcomplete==2.0.0
contextlib2==0.6.* contextlib2==0.6.0.post1
distlib==0.3.* distlib==0.3.6
filelock==3.8.* filelock==3.8.0
future==0.18.* future==0.18.2
importlib-metadata==5.1.* importlib-metadata==5.1.0
importlib-resources==5.1.* importlib-resources==5.1.2
netaddr==0.8.* netaddr==0.8.0
netifaces==0.10.* netifaces==0.10.9
verboselogs==1.7.* verboselogs==1.7
virtualenv==20.17.* virtualenv==20.17.0

View File

@@ -13,7 +13,7 @@ else
fi fi
# Clone the HailoRT driver repository # Clone the HailoRT driver repository
git clone --depth 1 --branch v4.19.0 https://github.com/hailo-ai/hailort-drivers.git git clone --depth 1 --branch v4.18.0 https://github.com/hailo-ai/hailort-drivers.git
# Build and install the HailoRT driver # Build and install the HailoRT driver
cd hailort-drivers/linux/pcie cd hailort-drivers/linux/pcie
@@ -38,7 +38,7 @@ cd ../../
if [ ! -d /lib/firmware/hailo ]; then if [ ! -d /lib/firmware/hailo ]; then
sudo mkdir /lib/firmware/hailo sudo mkdir /lib/firmware/hailo
fi fi
sudo mv hailo8_fw.*.bin /lib/firmware/hailo/hailo8_fw.bin sudo mv hailo8_fw.4.17.0.bin /lib/firmware/hailo/hailo8_fw.bin
# Install udev rules # Install udev rules
sudo cp ./linux/pcie/51-hailo-udev.rules /etc/udev/rules.d/ sudo cp ./linux/pcie/51-hailo-udev.rules /etc/udev/rules.d/

View File

@@ -211,9 +211,6 @@ ENV TOKENIZERS_PARALLELISM=true
# https://github.com/huggingface/transformers/issues/27214 # https://github.com/huggingface/transformers/issues/27214
ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1 ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
ENV OPENCV_FFMPEG_LOGLEVEL=8
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}" ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
ENV LIBAVFORMAT_VERSION_MAJOR=60 ENV LIBAVFORMAT_VERSION_MAJOR=60

View File

@@ -87,8 +87,8 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
apt-get -qq update apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y \ apt-get -qq install --no-install-recommends --no-install-suggests -y \
intel-opencl-icd=24.35.30872.31-996~22.04 intel-level-zero-gpu=1.3.29735.27-914~22.04 intel-media-va-driver-non-free=24.3.3-996~22.04 \ intel-opencl-icd intel-level-zero-gpu intel-media-va-driver-non-free \
libmfx1=23.2.2-880~22.04 libmfxgen1=24.2.4-914~22.04 libvpl2=1:2.13.0.0-996~22.04 libmfx1 libmfxgen1 libvpl2
rm -f /usr/share/keyrings/intel-graphics.gpg rm -f /usr/share/keyrings/intel-graphics.gpg
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list

View File

@@ -1,11 +1,9 @@
click == 8.1.* click == 8.1.*
# FastAPI # FastAPI
aiohttp == 3.11.2
starlette == 0.41.2
starlette-context == 0.3.6 starlette-context == 0.3.6
fastapi == 0.115.* fastapi == 0.115.0
uvicorn == 0.30.* uvicorn == 0.30.*
slowapi == 0.1.* slowapi == 0.1.9
imutils == 0.5.* imutils == 0.5.*
joserfc == 1.0.* joserfc == 1.0.*
pathvalidate == 3.2.* pathvalidate == 3.2.*
@@ -18,10 +16,10 @@ paho-mqtt == 2.1.*
pandas == 2.2.* pandas == 2.2.*
peewee == 3.17.* peewee == 3.17.*
peewee_migrate == 1.13.* peewee_migrate == 1.13.*
psutil == 6.1.* psutil == 5.9.*
pydantic == 2.8.* pydantic == 2.8.*
git+https://github.com/fbcotter/py3nvml#egg=py3nvml git+https://github.com/fbcotter/py3nvml#egg=py3nvml
pytz == 2024.* pytz == 2024.1
pyzmq == 26.2.* pyzmq == 26.2.*
ruamel.yaml == 0.18.* ruamel.yaml == 0.18.*
tzlocal == 5.2 tzlocal == 5.2

View File

@@ -165,7 +165,7 @@ if config.get("birdseye", {}).get("restream", False):
birdseye: dict[str, any] = config.get("birdseye") birdseye: dict[str, any] = config.get("birdseye")
input = f"-f rawvideo -pix_fmt yuv420p -video_size {birdseye.get('width', 1280)}x{birdseye.get('height', 720)} -r 10 -i {BIRDSEYE_PIPE}" input = f"-f rawvideo -pix_fmt yuv420p -video_size {birdseye.get('width', 1280)}x{birdseye.get('height', 720)} -r 10 -i {BIRDSEYE_PIPE}"
ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(ffmpeg_path, config.get('ffmpeg', {}).get('hwaccel_args', ''), input, '-rtsp_transport tcp -f rtsp {output}')}" ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(ffmpeg_path, config.get('ffmpeg', {}).get('hwaccel_args'), input, '-rtsp_transport tcp -f rtsp {output}')}"
if go2rtc_config.get("streams"): if go2rtc_config.get("streams"):
go2rtc_config["streams"]["birdseye"] = ffmpeg_cmd go2rtc_config["streams"]["birdseye"] = ffmpeg_cmd

View File

@@ -12,11 +12,26 @@ ARG TARGETARCH
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
# Build CuDNN
FROM wget AS cudnn-deps
ARG COMPUTE_LEVEL
RUN apt-get update \
&& apt-get install -y git build-essential
RUN wget https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.1-1_all.deb \
&& dpkg -i cuda-keyring_1.1-1_all.deb \
&& apt-get update \
&& apt-get -y install cuda-toolkit \
&& rm -rf /var/lib/apt/lists/*
FROM tensorrt-base AS frigate-tensorrt FROM tensorrt-base AS frigate-tensorrt
ENV TRT_VER=8.5.3 ENV TRT_VER=8.5.3
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl && \ pip3 install -U /deps/trt-wheels/*.whl && \
ldconfig ldconfig
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
ENV LD_LIBRARY_PATH=/usr/local/lib/python3.9/dist-packages/tensorrt:/usr/local/cuda/lib64:/usr/local/lib/python3.9/dist-packages/nvidia/cufft/lib ENV LD_LIBRARY_PATH=/usr/local/lib/python3.9/dist-packages/tensorrt:/usr/local/cuda/lib64:/usr/local/lib/python3.9/dist-packages/nvidia/cufft/lib
WORKDIR /opt/frigate/ WORKDIR /opt/frigate/
@@ -27,7 +42,7 @@ FROM devcontainer AS devcontainer-trt
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
COPY --from=trt-deps /usr/local/cuda-12.1 /usr/local/cuda COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
COPY docker/tensorrt/detector/rootfs/ / COPY docker/tensorrt/detector/rootfs/ /
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \

View File

@@ -10,8 +10,8 @@ ARG DEBIAN_FRONTEND
# Use a separate container to build wheels to prevent build dependencies in final image # Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \ RUN apt-get -qq update \
&& apt-get -qq install -y --no-install-recommends \ && apt-get -qq install -y --no-install-recommends \
python3.9 python3.9-dev \ python3.9 python3.9-dev \
wget build-essential cmake git \ wget build-essential cmake git \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# Ensure python3 defaults to python3.9 # Ensure python3 defaults to python3.9
@@ -41,11 +41,7 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,t
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh && TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
ADD https://nvidia.box.com/shared/static/9aemm4grzbbkfaesg5l7fplgjtmswhj8.whl /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl RUN pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
RUN pip3 uninstall -y onnxruntime-openvino \
&& pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt \
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
FROM build-wheels AS trt-model-wheels FROM build-wheels AS trt-model-wheels
ARG DEBIAN_FRONTEND ARG DEBIAN_FRONTEND

View File

@@ -24,9 +24,8 @@ ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
COPY --from=trt-deps /usr/local/cuda-12.* /usr/local/cuda
COPY docker/tensorrt/detector/rootfs/ / COPY docker/tensorrt/detector/rootfs/ /
ENV YOLO_MODELS="" ENV YOLO_MODELS="yolov7-320"
HEALTHCHECK --start-period=600s --start-interval=5s --interval=15s --timeout=5s --retries=3 \ HEALTHCHECK --start-period=600s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1 CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1

View File

@@ -11,7 +11,6 @@ set -o errexit -o nounset -o pipefail
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"} MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"}
TRT_VER=${TRT_VER:-$(cat /etc/TENSORRT_VER)} TRT_VER=${TRT_VER:-$(cat /etc/TENSORRT_VER)}
OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}" OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}"
YOLO_MODELS=${YOLO_MODELS:-""}
# Create output folder # Create output folder
mkdir -p ${OUTPUT_FOLDER} mkdir -p ${OUTPUT_FOLDER}
@@ -20,11 +19,6 @@ FIRST_MODEL=true
MODEL_DOWNLOAD="" MODEL_DOWNLOAD=""
MODEL_CONVERT="" MODEL_CONVERT=""
if [ -z "$YOLO_MODELS"]; then
echo "tensorrt model preparation disabled"
exit 0
fi
for model in ${YOLO_MODELS//,/ } for model in ${YOLO_MODELS//,/ }
do do
# Remove old link in case path/version changed # Remove old link in case path/version changed

View File

@@ -9,6 +9,6 @@ nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64' nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64' nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64' nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
onnx==1.16.*; platform_machine == 'x86_64' onnx==1.14.0; platform_machine == 'x86_64'
onnxruntime-gpu==1.18.*; platform_machine == 'x86_64' onnxruntime-gpu==1.17.*; platform_machine == 'x86_64'
protobuf==3.20.3; platform_machine == 'x86_64' protobuf==3.20.3; platform_machine == 'x86_64'

View File

@@ -174,7 +174,7 @@ NOTE: The folder that is set for the config needs to be the folder that contains
### Custom go2rtc version ### Custom go2rtc version
Frigate currently includes go2rtc v1.9.2, there may be certain cases where you want to run a different version of go2rtc. Frigate currently includes go2rtc v1.9.4, there may be certain cases where you want to run a different version of go2rtc.
To do this: To do this:

View File

@@ -41,7 +41,6 @@ cameras:
... ...
onvif: onvif:
# Required: host of the camera being connected to. # Required: host of the camera being connected to.
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
host: 0.0.0.0 host: 0.0.0.0
# Optional: ONVIF port for device (default: shown below). # Optional: ONVIF port for device (default: shown below).
port: 8000 port: 8000
@@ -50,8 +49,6 @@ cameras:
user: admin user: admin
# Optional: password for login. # Optional: password for login.
password: admin password: admin
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
tls_insecure: False
# Optional: PTZ camera object autotracking. Keeps a moving object in # Optional: PTZ camera object autotracking. Keeps a moving object in
# the center of the frame by automatically moving the PTZ camera. # the center of the frame by automatically moving the PTZ camera.
autotracking: autotracking:

View File

@@ -181,7 +181,7 @@ go2rtc:
- rtspx://192.168.1.1:7441/abcdefghijk - rtspx://192.168.1.1:7441/abcdefghijk
``` ```
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-rtsp) [See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-rtsp)
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect. In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.

View File

@@ -109,7 +109,7 @@ This list of working and non-working PTZ cameras is based on user feedback.
| Reolink E1 Zoom | ✅ | ❌ | | | Reolink E1 Zoom | ✅ | ❌ | |
| Reolink RLC-823A 16x | ✅ | ❌ | | | Reolink RLC-823A 16x | ✅ | ❌ | |
| Speco O8P32X | ✅ | ❌ | | | Speco O8P32X | ✅ | ❌ | |
| Sunba 405-D20X | ✅ | ❌ | Incomplete ONVIF support reported on original, and 4k models. All models are suspected incompatable. | | Sunba 405-D20X | ✅ | ❌ | |
| Tapo | ✅ | ❌ | Many models supported, ONVIF Service Port: 2020 | | Tapo | ✅ | ❌ | Many models supported, ONVIF Service Port: 2020 |
| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands | | Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands |
| Uniview IPC6612SR-X33-VG | ✅ | ✅ | Leave `calibrate_on_startup` as `False`. A user has reported that zooming with `absolute` is working. | | Uniview IPC6612SR-X33-VG | ✅ | ✅ | Leave `calibrate_on_startup` as `False`. A user has reported that zooming with `absolute` is working. |

View File

@@ -3,15 +3,9 @@ id: genai
title: Generative AI title: Generative AI
--- ---
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail. Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects.
Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle. Descriptions can also be regenerated manually via the Frigate UI. Semantic Search must be enabled to use Generative AI. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
:::info
Semantic Search must be enabled to use Generative AI.
:::
## Configuration ## Configuration
@@ -35,21 +29,15 @@ cameras:
## Ollama ## Ollama
:::warning [Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance. CPU inference is not recommended.
Using Ollama on CPU is not recommended, high inference times make using Generative AI impractical. Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [docker container](https://hub.docker.com/r/ollama/ollama) available.
::: Parallel requests also come with some caveats. See the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-does-ollama-handle-concurrent-requests).
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available.
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-does-ollama-handle-concurrent-requests).
### Supported Models ### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). At the time of writing, this includes `llava`, `llava-llama3`, `llava-phi3`, and `moondream`. Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first i.e. by running `ollama pull llava:7b` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag. You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). At the time of writing, this includes `llava`, `llava-llama3`, `llava-phi3`, and `moondream`. Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first.
:::note :::note
@@ -64,7 +52,7 @@ genai:
enabled: True enabled: True
provider: ollama provider: ollama
base_url: http://localhost:11434 base_url: http://localhost:11434
model: llava:7b model: llava
``` ```
## Google Gemini ## Google Gemini
@@ -144,10 +132,6 @@ Frigate's thumbnail search excels at identifying specific details about tracked
While generating simple descriptions of detected objects is useful, understanding intent provides a deeper layer of insight. Instead of just recognizing "what" is in a scene, Frigates default prompts aim to infer "why" it might be there or "what" it could do next. Descriptions tell you whats happening, but intent gives context. For instance, a person walking toward a door might seem like a visitor, but if theyre moving quickly after hours, you can infer a potential break-in attempt. Detecting a person loitering near a door at night can trigger an alert sooner than simply noting "a person standing by the door," helping you respond based on the situations context. While generating simple descriptions of detected objects is useful, understanding intent provides a deeper layer of insight. Instead of just recognizing "what" is in a scene, Frigates default prompts aim to infer "why" it might be there or "what" it could do next. Descriptions tell you whats happening, but intent gives context. For instance, a person walking toward a door might seem like a visitor, but if theyre moving quickly after hours, you can infer a potential break-in attempt. Detecting a person loitering near a door at night can trigger an alert sooner than simply noting "a person standing by the door," helping you respond based on the situations context.
### Using GenAI for notifications
Frigate provides an [MQTT topic](/integrations/mqtt), `frigate/tracked_object_update`, that is updated with a JSON payload containing `event_id` and `description` when your AI provider returns a description for a tracked object. This description could be used directly in notifications, such as sending alerts to your phone or making audio announcements. If additional details from the tracked object are needed, you can query the [HTTP API](/integrations/api/event-events-event-id-get) using the `event_id`, eg: `http://frigate_ip:5000/api/events/<event_id>`.
## Custom Prompts ## Custom Prompts
Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows: Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:
@@ -178,7 +162,7 @@ genai:
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones. Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction. Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the thumbnails collected over the object's lifetime to the model. Using a snapshot provides the AI with a higher-resolution image (typically downscaled by the AI itself), but the trade-off is that only a single image is used, which might limit the model's ability to determine object movement or direction.
```yaml ```yaml
cameras: cameras:

View File

@@ -231,11 +231,28 @@ docker run -d \
### Setup Decoder ### Setup Decoder
Using `preset-nvidia` ffmpeg will automatically select the necessary profile for the incoming video, and will log an error if the profile is not supported by your GPU. The decoder you need to pass in the `hwaccel_args` will depend on the input video.
A list of supported codecs (you can use `ffmpeg -decoders | grep cuvid` in the container to get the ones your card supports)
```
V..... h263_cuvid Nvidia CUVID H263 decoder (codec h263)
V..... h264_cuvid Nvidia CUVID H264 decoder (codec h264)
V..... hevc_cuvid Nvidia CUVID HEVC decoder (codec hevc)
V..... mjpeg_cuvid Nvidia CUVID MJPEG decoder (codec mjpeg)
V..... mpeg1_cuvid Nvidia CUVID MPEG1VIDEO decoder (codec mpeg1video)
V..... mpeg2_cuvid Nvidia CUVID MPEG2VIDEO decoder (codec mpeg2video)
V..... mpeg4_cuvid Nvidia CUVID MPEG4 decoder (codec mpeg4)
V..... vc1_cuvid Nvidia CUVID VC1 decoder (codec vc1)
V..... vp8_cuvid Nvidia CUVID VP8 decoder (codec vp8)
V..... vp9_cuvid Nvidia CUVID VP9 decoder (codec vp9)
```
For example, for H264 video, you'll select `preset-nvidia-h264`.
```yaml ```yaml
ffmpeg: ffmpeg:
hwaccel_args: preset-nvidia hwaccel_args: preset-nvidia-h264
``` ```
If everything is working correctly, you should see a significant improvement in performance. If everything is working correctly, you should see a significant improvement in performance.

View File

@@ -203,13 +203,14 @@ detectors:
ov: ov:
type: openvino type: openvino
device: AUTO device: AUTO
model:
path: /openvino-model/ssdlite_mobilenet_v2.xml
model: model:
width: 300 width: 300
height: 300 height: 300
input_tensor: nhwc input_tensor: nhwc
input_pixel_format: bgr input_pixel_format: bgr
path: /openvino-model/ssdlite_mobilenet_v2.xml
labelmap_path: /openvino-model/coco_91cl_bkgr.txt labelmap_path: /openvino-model/coco_91cl_bkgr.txt
record: record:

View File

@@ -23,13 +23,13 @@ If you are using go2rtc, you should adjust the following settings in your camera
- Video codec: **H.264** - provides the most compatible video codec with all Live view technologies and browsers. Avoid any kind of "smart codec" or "+" codec like _H.264+_ or _H.265+_. as these non-standard codecs remove keyframes (see below). - Video codec: **H.264** - provides the most compatible video codec with all Live view technologies and browsers. Avoid any kind of "smart codec" or "+" codec like _H.264+_ or _H.265+_. as these non-standard codecs remove keyframes (see below).
- Audio codec: **AAC** - provides the most compatible audio codec with all Live view technologies and browsers that support audio. - Audio codec: **AAC** - provides the most compatible audio codec with all Live view technologies and browsers that support audio.
- I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes. For many users this may not be an issue, but it should be noted that that a 1x i-frame interval will cause more storage utilization if you are using the stream for the `record` role as well. - I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes.
The default video and audio codec on your camera may not always be compatible with your browser, which is why setting them to H.264 and AAC is recommended. See the [go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness) for codec support information. The default video and audio codec on your camera may not always be compatible with your browser, which is why setting them to H.264 and AAC is recommended. See the [go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness) for codec support information.
### Audio Support ### Audio Support
MSE Requires PCMA/PCMU or AAC audio, WebRTC requires PCMA/PCMU or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled. MSE Requires AAC audio, WebRTC requires PCMU/PCMA, or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
```yaml ```yaml
go2rtc: go2rtc:

View File

@@ -92,16 +92,10 @@ motion:
lightning_threshold: 0.8 lightning_threshold: 0.8
``` ```
:::warning :::tip
Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed. Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed.
::: :::
:::note
Lightning threshold does not stop motion based recordings from being saved.
:::
Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in no motion detection. This is done via the `lightning_threshold` configuration. It is defined as the percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera. Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in no motion detection. This is done via the `lightning_threshold` configuration. It is defined as the percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera.

View File

@@ -22,8 +22,8 @@ Frigate supports multiple different detectors that work on different types of ha
- [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured. - [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured.
**Nvidia** **Nvidia**
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs and Jetson devices, using one of many default models. - [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs, using one of many default models.
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` or `-tensorrt-jp(4/5)` Frigate images when a supported ONNX model is configured. - [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
**Rockchip** **Rockchip**
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs. - [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
@@ -144,9 +144,7 @@ detectors:
#### SSDLite MobileNet v2 #### SSDLite MobileNet v2
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model:
```yaml ```yaml
detectors: detectors:
@@ -225,7 +223,7 @@ The model used for TensorRT must be preprocessed on the same hardware platform t
The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host. The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host.
By default, no models will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder. By default, the `yolov7-320` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
If you have a Jetson device with DLAs (Xavier or Orin), you can generate a model that will run on the DLA by appending `-dla` to your model name, e.g. specify `YOLO_MODELS=yolov7-320-dla`. The model will run on DLA0 (Frigate does not currently support DLA1). DLA-incompatible layers will fall back to running on the GPU. If you have a Jetson device with DLAs (Xavier or Orin), you can generate a model that will run on the DLA by appending `-dla` to your model name, e.g. specify `YOLO_MODELS=yolov7-320-dla`. The model will run on DLA0 (Frigate does not currently support DLA1). DLA-incompatible layers will fall back to running on the GPU.
@@ -256,7 +254,6 @@ yolov4x-mish-640
yolov7-tiny-288 yolov7-tiny-288
yolov7-tiny-416 yolov7-tiny-416
yolov7-640 yolov7-640
yolov7-416
yolov7-320 yolov7-320
yolov7x-640 yolov7x-640
yolov7x-320 yolov7x-320
@@ -267,7 +264,7 @@ An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yol
```yml ```yml
frigate: frigate:
environment: environment:
- YOLO_MODELS=yolov7-320,yolov7x-640 - YOLO_MODELS=yolov4-608,yolov7x-640
- USE_FP16=false - USE_FP16=false
``` ```
@@ -285,8 +282,6 @@ The TensorRT detector can be selected by specifying `tensorrt` as the model type
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated. The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
Use the config below to work with generated TRT models:
```yaml ```yaml
detectors: detectors:
tensorrt: tensorrt:
@@ -420,24 +415,6 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl
ONNX is an open format for building machine learning models, Frigate supports running ONNX models on CPU, OpenVINO, and TensorRT. On startup Frigate will automatically try to use a GPU if one is available. ONNX is an open format for building machine learning models, Frigate supports running ONNX models on CPU, OpenVINO, and TensorRT. On startup Frigate will automatically try to use a GPU if one is available.
:::info
If the correct build is used for your GPU then the GPU will be detected and used automatically.
- **AMD**
- ROCm will automatically be detected and used with the ONNX detector in the `-rocm` Frigate image.
- **Intel**
- OpenVINO will automatically be detected and used with the ONNX detector in the default Frigate image.
- **Nvidia**
- Nvidia GPUs will automatically be detected and used with the ONNX detector in the `-tensorrt` Frigate image.
- Jetson devices will automatically be detected and used with the ONNX detector in the `-tensorrt-jp(4/5)` Frigate image.
:::
:::tip :::tip
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be: When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
@@ -480,7 +457,6 @@ model:
width: 320 # <--- should match whatever was set in notebook width: 320 # <--- should match whatever was set in notebook
height: 320 # <--- should match whatever was set in notebook height: 320 # <--- should match whatever was set in notebook
input_pixel_format: bgr input_pixel_format: bgr
input_tensor: nchw
path: /config/yolo_nas_s.onnx path: /config/yolo_nas_s.onnx
labelmap_path: /labelmap/coco-80.txt labelmap_path: /labelmap/coco-80.txt
``` ```
@@ -506,12 +482,11 @@ detectors:
cpu1: cpu1:
type: cpu type: cpu
num_threads: 3 num_threads: 3
model:
path: "/custom_model.tflite"
cpu2: cpu2:
type: cpu type: cpu
num_threads: 3 num_threads: 3
model:
path: "/custom_model.tflite"
``` ```
When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance. When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance.
@@ -638,6 +613,8 @@ detectors:
hailo8l: hailo8l:
type: hailo8l type: hailo8l
device: PCIe device: PCIe
model:
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
model: model:
width: 300 width: 300
@@ -645,5 +622,4 @@ model:
input_tensor: nhwc input_tensor: nhwc
input_pixel_format: bgr input_pixel_format: bgr
model_type: ssd model_type: ssd
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
``` ```

View File

@@ -5,7 +5,7 @@ title: Available Objects
import labels from "../../../labelmap.txt"; import labels from "../../../labelmap.txt";
Frigate includes the object labels listed below from the Google Coral test data. Frigate includes the object models listed below from the Google Coral test data.
Please note: Please note:

View File

@@ -52,7 +52,7 @@ detectors:
# Required: name of the detector # Required: name of the detector
detector_name: detector_name:
# Required: type of the detector # Required: type of the detector
# Frigate provides many types, see https://docs.frigate.video/configuration/object_detectors for more details (default: shown below) # Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below)
# Additional detector types can also be plugged in. # Additional detector types can also be plugged in.
# Detectors may require additional configuration. # Detectors may require additional configuration.
# Refer to the Detectors configuration page for more information. # Refer to the Detectors configuration page for more information.
@@ -117,27 +117,25 @@ auth:
hash_iterations: 600000 hash_iterations: 600000
# Optional: model modifications # Optional: model modifications
# NOTE: The default values are for the EdgeTPU detector.
# Other detectors will require the model config to be set.
model: model:
# Required: path to the model (default: automatic based on detector) # Optional: path to the model (default: automatic based on detector)
path: /edgetpu_model.tflite path: /edgetpu_model.tflite
# Required: path to the labelmap (default: shown below) # Optional: path to the labelmap (default: shown below)
labelmap_path: /labelmap.txt labelmap_path: /labelmap.txt
# Required: Object detection model input width (default: shown below) # Required: Object detection model input width (default: shown below)
width: 320 width: 320
# Required: Object detection model input height (default: shown below) # Required: Object detection model input height (default: shown below)
height: 320 height: 320
# Required: Object detection model input colorspace # Optional: Object detection model input colorspace
# Valid values are rgb, bgr, or yuv. (default: shown below) # Valid values are rgb, bgr, or yuv. (default: shown below)
input_pixel_format: rgb input_pixel_format: rgb
# Required: Object detection model input tensor format # Optional: Object detection model input tensor format
# Valid values are nhwc or nchw (default: shown below) # Valid values are nhwc or nchw (default: shown below)
input_tensor: nhwc input_tensor: nhwc
# Required: Object detection model type, currently only used with the OpenVINO detector # Optional: Object detection model type, currently only used with the OpenVINO detector
# Valid values are ssd, yolox, yolonas (default: shown below) # Valid values are ssd, yolox, yolonas (default: shown below)
model_type: ssd model_type: ssd
# Required: Label name modifications. These are merged into the standard labelmap. # Optional: Label name modifications. These are merged into the standard labelmap.
labelmap: labelmap:
2: vehicle 2: vehicle
# Optional: Map of object labels to their attribute labels (default: depends on model) # Optional: Map of object labels to their attribute labels (default: depends on model)
@@ -550,12 +548,10 @@ genai:
# Uses https://github.com/AlexxIT/go2rtc (v1.9.2) # Uses https://github.com/AlexxIT/go2rtc (v1.9.2)
go2rtc: go2rtc:
# Optional: Live stream configuration for WebUI. # Optional: jsmpeg stream configuration for WebUI
# NOTE: Can be overridden at the camera level
live: live:
# Optional: Set the name of the stream configured in go2rtc # Optional: Set the name of the stream that should be used for live view
# that should be used for live view in frigate WebUI. (default: name of camera) # in frigate WebUI. (default: name of camera)
# NOTE: In most cases this should be set at the camera level only.
stream_name: camera_name stream_name: camera_name
# Optional: Set the height of the jsmpeg stream. (default: 720) # Optional: Set the height of the jsmpeg stream. (default: 720)
# This must be less than or equal to the height of the detect stream. Lower resolutions # This must be less than or equal to the height of the detect stream. Lower resolutions
@@ -688,7 +684,6 @@ cameras:
# to enable PTZ controls. # to enable PTZ controls.
onvif: onvif:
# Required: host of the camera being connected to. # Required: host of the camera being connected to.
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
host: 0.0.0.0 host: 0.0.0.0
# Optional: ONVIF port for device (default: shown below). # Optional: ONVIF port for device (default: shown below).
port: 8000 port: 8000
@@ -697,8 +692,6 @@ cameras:
user: admin user: admin
# Optional: password for login. # Optional: password for login.
password: admin password: admin
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
tls_insecure: False
# Optional: Ignores time synchronization mismatches between the camera and the server during authentication. # Optional: Ignores time synchronization mismatches between the camera and the server during authentication.
# Using NTP on both ends is recommended and this should only be set to True in a "safe" environment due to the security risk it represents. # Using NTP on both ends is recommended and this should only be set to True in a "safe" environment due to the security risk it represents.
ignore_time_mismatch: False ignore_time_mismatch: False
@@ -762,8 +755,6 @@ cameras:
- cat - cat
# Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify) # Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify)
required_zones: [] required_zones: []
# Optional: Save thumbnails sent to generative AI for review/debugging purposes (default: shown below)
debug_save_thumbnails: False
# Optional # Optional
ui: ui:

View File

@@ -7,7 +7,7 @@ title: Restream
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.2) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#configuration) for more advanced configurations and features. Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.4) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#configuration) for more advanced configurations and features.
:::note :::note
@@ -132,31 +132,9 @@ cameras:
- detect - detect
``` ```
## Handling Complex Passwords
go2rtc expects URL-encoded passwords in the config, [urlencoder.org](https://urlencoder.org) can be used for this purpose.
For example:
```yaml
go2rtc:
streams:
my_camera: rtsp://username:$@foo%@192.168.1.100
```
becomes
```yaml
go2rtc:
streams:
my_camera: rtsp://username:$%40foo%25@192.168.1.100
```
See [this comment(https://github.com/AlexxIT/go2rtc/issues/1217#issuecomment-2242296489) for more information.
## Advanced Restream Configurations ## Advanced Restream Configurations
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
NOTE: The output will need to be passed with two curly braces `{{output}}` NOTE: The output will need to be passed with two curly braces `{{output}}`

View File

@@ -5,7 +5,7 @@ title: Using Semantic Search
Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results. Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results.
Frigate uses [Jina AI's CLIP model](https://huggingface.co/jinaai/jina-clip-v1) to create and save embeddings to Frigate's database. All of this runs locally. Frigate has support for [Jina AI's CLIP model](https://huggingface.co/jinaai/jina-clip-v1) to create embeddings, which runs locally. Embeddings are then saved to Frigate's database.
Semantic Search is accessed via the _Explore_ view in the Frigate UI. Semantic Search is accessed via the _Explore_ view in the Frigate UI.
@@ -19,7 +19,7 @@ For best performance, 16GB or more of RAM and a dedicated GPU are recommended.
## Configuration ## Configuration
Semantic Search is disabled by default, and must be enabled in your config file or in the UI's Settings page before it can be used. Semantic Search is a global configuration setting. Semantic search is disabled by default, and must be enabled in your config file before it can be used. Semantic Search is a global configuration setting.
```yaml ```yaml
semantic_search: semantic_search:
@@ -29,9 +29,9 @@ semantic_search:
:::tip :::tip
The embeddings database can be re-indexed from the existing tracked objects in your database by adding `reindex: True` to your `semantic_search` configuration or by toggling the switch on the Search Settings page in the UI and restarting Frigate. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. Make sure to turn the UI's switch off or set the config back to `False` before restarting Frigate again. The embeddings database can be re-indexed from the existing tracked objects in your database by adding `reindex: True` to your `semantic_search` configuration. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. Make sure to set the config back to `False` before restarting Frigate again.
If you are enabling Semantic Search for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to enable the `reindex` feature in order to do that. If you are enabling the Search feature for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to enable the `reindex` feature in order to do that.
::: :::
@@ -39,9 +39,15 @@ If you are enabling Semantic Search for the first time, be advised that Frigate
The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails. The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions. The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
Differently weighted versions of the Jina model are available and can be selected by setting the `model_size` config option as `small` or `large`: Differently weighted CLIP models are available and can be selected by setting the `model_size` config option:
:::tip
The CLIP models are downloaded in ONNX format, which means they will be accelerated using GPU hardware when available. This depends on the Docker build that is used. See [the object detector docs](../configuration/object_detectors.md) for more information.
:::
```yaml ```yaml
semantic_search: semantic_search:
@@ -50,41 +56,11 @@ semantic_search:
``` ```
- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable. - Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
- Configuring the `small` model employs a quantized version of the Jina model that uses less RAM and runs on CPU with a very negligible difference in embedding quality. - Configuring the `small` model employs a quantized version of the model that uses much less RAM and runs faster on CPU with a very negligible difference in embedding quality.
### GPU Acceleration
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used.
```yaml
semantic_search:
enabled: True
model_size: large
```
:::info
If the correct build is used for your GPU and the `large` model is configured, then the GPU will be detected and used automatically.
**NOTE:** Object detection and Semantic Search are independent features. If you want to use your GPU with Semantic Search, you must choose the appropriate Frigate Docker image for your GPU.
- **AMD**
- ROCm will automatically be detected and used for Semantic Search in the `-rocm` Frigate image.
- **Intel**
- OpenVINO will automatically be detected and used for Semantic Search in the default Frigate image.
- **Nvidia**
- Nvidia GPUs will automatically be detected and used for Semantic Search in the `-tensorrt` Frigate image.
- Jetson devices will automatically be detected and used for Semantic Search in the `-tensorrt-jp(4/5)` Frigate image.
:::
## Usage and Best Practices ## Usage and Best Practices
1. Semantic Search is used in conjunction with the other filters available on the Explore page. Use a combination of traditional filtering and Semantic Search for the best results. 1. Semantic search is used in conjunction with the other filters available on the Search page. Use a combination of traditional filtering and semantic search for the best results.
2. Use the thumbnail search type when searching for particular objects in the scene. Use the description search type when attempting to discern the intent of your object. 2. Use the thumbnail search type when searching for particular objects in the scene. Use the description search type when attempting to discern the intent of your object.
3. Because of how the AI models Frigate uses have been trained, the comparison between text and image embedding distances generally means that with multi-modal (`thumbnail` and `description`) searches, results matching `description` will appear first, even if a `thumbnail` embedding may be a better match. Play with the "Search Type" setting to help find what you are looking for. Note that if you are generating descriptions for specific objects or zones only, this may cause search results to prioritize the objects with descriptions even if the the ones without them are more relevant. 3. Because of how the AI models Frigate uses have been trained, the comparison between text and image embedding distances generally means that with multi-modal (`thumbnail` and `description`) searches, results matching `description` will appear first, even if a `thumbnail` embedding may be a better match. Play with the "Search Type" setting to help find what you are looking for. Note that if you are generating descriptions for specific objects or zones only, this may cause search results to prioritize the objects with descriptions even if the the ones without them are more relevant.
4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day". 4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day".

View File

@@ -28,7 +28,7 @@ For the Dahua/Loryta 5442 camera, I use the following settings:
- Encode Mode: H.264 - Encode Mode: H.264
- Resolution: 2688\*1520 - Resolution: 2688\*1520
- Frame Rate(FPS): 15 - Frame Rate(FPS): 15
- I Frame Interval: 30 (15 can also be used to prioritize streaming performance - see the [camera settings recommendations](../configuration/live) for more info) - I Frame Interval: 30
**Sub Stream (Detection)** **Sub Stream (Detection)**

View File

@@ -81,15 +81,15 @@ You can calculate the **minimum** shm size for each camera with the following fo
```console ```console
# Replace <width> and <height> # Replace <width> and <height>
$ python -c 'print("{:.2f}MB".format((<width> * <height> * 1.5 * 20 + 270480) / 1048576))' $ python -c 'print("{:.2f}MB".format((<width> * <height> * 1.5 * 10 + 270480) / 1048576))'
# Example for 1280x720, including logs # Example for 1280x720
$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 20 + 270480) / 1048576)) + 40' $ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 10 + 270480) / 1048576))'
46.63MB 13.44MB
# Example for eight cameras detecting at 1280x720, including logs # Example for eight cameras detecting at 1280x720, including logs
$ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 20 + 270480) / 1048576) * 8 + 40))' $ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 10 + 270480) / 1048576) * 8 + 40))'
253MB 136.99MB
``` ```
The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration. The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration.
@@ -193,9 +193,8 @@ services:
container_name: frigate container_name: frigate
privileged: true # this may not be necessary for all setups privileged: true # this may not be necessary for all setups
restart: unless-stopped restart: unless-stopped
stop_grace_period: 30s # allow enough time to shut down the various services
image: ghcr.io/blakeblackshear/frigate:stable image: ghcr.io/blakeblackshear/frigate:stable
shm_size: "512mb" # update for your cameras based on calculation above shm_size: "64mb" # update for your cameras based on calculation above
devices: devices:
- /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions - /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux - /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
@@ -225,7 +224,6 @@ If you can't use docker compose, you can run the container with something simila
docker run -d \ docker run -d \
--name frigate \ --name frigate \
--restart=unless-stopped \ --restart=unless-stopped \
--stop-timeout 30 \
--mount type=tmpfs,target=/tmp/cache,tmpfs-size=1000000000 \ --mount type=tmpfs,target=/tmp/cache,tmpfs-size=1000000000 \
--device /dev/bus/usb:/dev/bus/usb \ --device /dev/bus/usb:/dev/bus/usb \
--device /dev/dri/renderD128 \ --device /dev/dri/renderD128 \

View File

@@ -13,15 +13,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
# Setup a go2rtc stream # Setup a go2rtc stream
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#module-streams), not just rtsp. First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. For the best experience, you should set the stream name under go2rtc to match the name of your camera so that Frigate will automatically map it and be able to use better live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#module-streams), not just rtsp.
:::tip
For the best experience, you should set the stream name under `go2rtc` to match the name of your camera so that Frigate will automatically map it and be able to use better live view options for the camera.
See [the live view docs](../configuration/live.md#setting-stream-for-live-ui) for more information.
:::
```yaml ```yaml
go2rtc: go2rtc:
@@ -47,8 +39,8 @@ After adding this to the config, restart Frigate and try to watch the live strea
- Check Video Codec: - Check Video Codec:
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported. - If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#codecs-madness) in go2rtc documentation. - If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#codecs-madness) in go2rtc documentation.
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. - If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
```yaml ```yaml
go2rtc: go2rtc:
streams: streams:

View File

@@ -115,7 +115,6 @@ services:
frigate: frigate:
container_name: frigate container_name: frigate
restart: unless-stopped restart: unless-stopped
stop_grace_period: 30s
image: ghcr.io/blakeblackshear/frigate:stable image: ghcr.io/blakeblackshear/frigate:stable
volumes: volumes:
- ./config:/config - ./config:/config
@@ -307,9 +306,7 @@ By default, Frigate will retain video of all tracked objects for 10 days. The fu
### Step 7: Complete config ### Step 7: Complete config
At this point you have a complete config with basic functionality. At this point you have a complete config with basic functionality. You can see the [full config reference](../configuration/reference.md) for a complete list of configuration options.
- View [common configuration examples](../configuration/index.md#common-configuration-examples) for a list of common configuration examples.
- View [full config reference](../configuration/reference.md) for a complete list of configuration options.
### Follow up ### Follow up

View File

@@ -94,18 +94,6 @@ Message published for each changed tracked object. The first message is publishe
} }
``` ```
### `frigate/tracked_object_update`
Message published for updates to tracked object metadata, for example when GenAI runs and returns a tracked object description.
```json
{
"type": "description",
"id": "1607123955.475377-mxklsc",
"description": "The car is a red sedan moving away from the camera."
}
```
### `frigate/reviews` ### `frigate/reviews`
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. When additional objects are detected or when a zone change occurs, it will publish a, `update` message with the same id. When the review activity has ended a final `end` message is published. Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. When additional objects are detected or when a zone change occurs, it will publish a, `update` message with the same id. When the review activity has ended a final `end` message is published.

View File

@@ -5,7 +5,7 @@ title: Requesting your first model
## Step 1: Upload and annotate your images ## Step 1: Upload and annotate your images
Before requesting your first model, you will need to upload and verify at least 1 image to Frigate+. The more images you upload, annotate, and verify the better your results will be. Most users start to see very good results once they have at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate. Before requesting your first model, you will need to upload at least 10 images to Frigate+. But for the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate.
It is recommended to submit **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present. It is recommended to submit **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
@@ -13,7 +13,7 @@ For more detailed recommendations, you can refer to the docs on [improving your
## Step 2: Submit a model request ## Step 2: Submit a model request
Once you have an initial set of verified images, you can request a model on the Models page. For guidance on choosing a model type, refer to [this part of the documentation](./index.md#available-model-types). Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours. Once you have an initial set of verified images, you can request a model on the Models page. Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours.
![Plus Models Page](/img/plus/plus-models.jpg) ![Plus Models Page](/img/plus/plus-models.jpg)
## Step 3: Set your model id in the config ## Step 3: Set your model id in the config

View File

@@ -3,7 +3,7 @@ id: improving_model
title: Improving your model title: Improving your model
--- ---
You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. With all the new images now being submitted by subscribers, future base models will improve as more and more examples are incorporated. Note that only images with at least one verified label will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training. You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. Because a limited number of users submitted images to Frigate+ prior to this launch, you may need to submit several hundred images per camera to see good results. With all the new images now being submitted, future base models will improve as more and more users (including you) submit examples to Frigate+. Note that only verified images will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
- **Submit both true positives and false positives**. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present. - **Submit both true positives and false positives**. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
- **Lower your thresholds a little in order to generate more false/true positives near the threshold value**. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores. - **Lower your thresholds a little in order to generate more false/true positives near the threshold value**. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores.
@@ -36,17 +36,18 @@ Misidentified objects should have a correct label added. For example, if a perso
## Shortcuts for a faster workflow ## Shortcuts for a faster workflow
| Shortcut Key | Description | |Shortcut Key|Description|
| ----------------- | ----------------------------- | |-----|--------|
| `?` | Show all keyboard shortcuts | |`?`|Show all keyboard shortcuts|
| `w` | Add box | |`w`|Add box|
| `d` | Toggle difficult | |`d`|Toggle difficult|
| `s` | Switch to the next label | |`s`|Switch to the next label|
| `tab` | Select next largest box | |`tab`|Select next largest box|
| `del` | Delete current box | |`del`|Delete current box|
| `esc` | Deselect/Cancel | |`esc`|Deselect/Cancel|
| `← ↑ → ↓` | Move box | |`← ↑ → ↓`|Move box|
| `Shift + ← ↑ → ↓` | Resize box | |`Shift + ← ↑ → ↓`|Resize box|
| `scrollwheel` | Zoom in/out | |`-`|Zoom out|
| `f` | Hide/show all but current box | |`=`|Zoom in|
| `spacebar` | Verify and save | |`f`|Hide/show all but current box|
|`spacebar`|Verify and save|

View File

@@ -15,36 +15,17 @@ With a subscription, 12 model trainings per year are included. If you cancel you
Information on how to integrate Frigate+ with Frigate can be found in the [integration docs](../integrations/plus.md). Information on how to integrate Frigate+ with Frigate can be found in the [integration docs](../integrations/plus.md).
## Available model types
There are two model types offered in Frigate+: `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types).
| Model Type | Description |
| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
## Supported detector types ## Supported detector types
Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), ONNX (`onnx`), and ROCm (`rocm`) detectors.
:::warning :::warning
Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15, which is still under development. Frigate+ models are not supported for TensorRT or OpenVino yet.
::: :::
| Hardware | Recommended Detector Type | Recommended Model Type | Currently, Frigate+ models only support CPU (`cpu`) and Coral (`edgetpu`) models. OpenVino is next in line to gain support.
| ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ---------------------- |
| [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended) | `cpu` | `mobiledet` |
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` |
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` |
| [NVidia GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` |
| [AMD ROCm GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#amdrocm-gpu-detector)\* | `rocm` | `yolonas` |
_\* Requires Frigate 0.15_ The models are created using the same MobileDet architecture as the default model. Additional architectures will be added in future releases as needed.
## Available label types ## Available label types

View File

@@ -49,10 +49,7 @@ The USB Coral can become stuck and need to be restarted, this can happen for a n
## PCIe Coral Not Detected ## PCIe Coral Not Detected
The most common reason for the PCIe Coral not being detected is that the driver has not been installed. This process varies based on what OS and kernel that is being run. The most common reason for the PCIe coral not being detected is that the driver has not been installed. See [the coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) for how to install the driver for the PCIe based coral.
- In most cases [the Coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) show how to install the driver for the PCIe based Coral.
- For Ubuntu 22.04+ https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
## Only One PCIe Coral Is Detected With Coral Dual EdgeTPU ## Only One PCIe Coral Is Detected With Coral Dual EdgeTPU

View File

@@ -98,11 +98,3 @@ docker run -d \
-p 8555:8555/udp \ -p 8555:8555/udp \
ghcr.io/blakeblackshear/frigate:stable ghcr.io/blakeblackshear/frigate:stable
``` ```
### My RTSP stream works fine in VLC, but it does not work when I put the same URL in my Frigate config. Is this a bug?
No. Frigate uses the TCP protocol to connect to your camera's RTSP URL. VLC automatically switches between UDP and TCP depending on network conditions and stream availability. So a stream that works in VLC but not in Frigate is likely due to VLC selecting UDP as the transfer protocol.
TCP ensures that all data packets arrive in the correct order. This is crucial for video recording, decoding, and stream processing, which is why Frigate enforces a TCP connection. UDP is faster but less reliable, as it does not guarantee packet delivery or order, and VLC does not have the same requirements as Frigate.
You can still configure Frigate to use UDP by using ffmpeg input args or the preset `preset-rtsp-udp`. See the [ffmpeg presets](/configuration/ffmpeg_presets) documentation.

7061
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -17,15 +17,15 @@
"write-heading-ids": "docusaurus write-heading-ids" "write-heading-ids": "docusaurus write-heading-ids"
}, },
"dependencies": { "dependencies": {
"@docusaurus/core": "^3.6.3", "@docusaurus/core": "^3.5.2",
"@docusaurus/preset-classic": "^3.6.3", "@docusaurus/preset-classic": "^3.5.2",
"@docusaurus/theme-mermaid": "^3.6.3", "@docusaurus/theme-mermaid": "^3.5.2",
"@docusaurus/plugin-content-docs": "^3.6.3", "@docusaurus/plugin-content-docs": "^3.5.2",
"@mdx-js/react": "^3.1.0", "@mdx-js/react": "^3.0.1",
"clsx": "^2.1.1", "clsx": "^2.1.1",
"docusaurus-plugin-openapi-docs": "^4.3.1", "docusaurus-plugin-openapi-docs": "^4.1.0",
"docusaurus-theme-openapi-docs": "^4.3.1", "docusaurus-theme-openapi-docs": "^4.1.0",
"prism-react-renderer": "^2.4.1", "prism-react-renderer": "^2.4.0",
"raw-loader": "^4.0.2", "raw-loader": "^4.0.2",
"react": "^18.3.1", "react": "^18.3.1",
"react-dom": "^18.3.1" "react-dom": "^18.3.1"

View File

@@ -26,7 +26,7 @@ const sidebars: SidebarsConfig = {
{ {
type: 'link', type: 'link',
label: 'Go2RTC Configuration Reference', label: 'Go2RTC Configuration Reference',
href: 'https://github.com/AlexxIT/go2rtc/tree/v1.9.2#configuration', href: 'https://github.com/AlexxIT/go2rtc/tree/v1.9.4#configuration',
} as PropSidebarItemLink, } as PropSidebarItemLink,
], ],
Detectors: [ Detectors: [

File diff suppressed because it is too large Load Diff

View File

@@ -17,17 +17,17 @@ from fastapi.responses import JSONResponse, PlainTextResponse
from markupsafe import escape from markupsafe import escape
from peewee import operator from peewee import operator
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters from frigate.api.defs.app_body import AppConfigSetBody
from frigate.api.defs.request.app_body import AppConfigSetBody from frigate.api.defs.app_query_parameters import AppTimelineHourlyQueryParameters
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.const import CONFIG_DIR
from frigate.models import Event, Timeline from frigate.models import Event, Timeline
from frigate.util.builtin import ( from frigate.util.builtin import (
clean_camera_user_pass, clean_camera_user_pass,
get_tz_modifiers, get_tz_modifiers,
update_yaml_from_url, update_yaml_from_url,
) )
from frigate.util.config import find_config_file
from frigate.util.services import ( from frigate.util.services import (
ffprobe_stream, ffprobe_stream,
get_nvidia_driver_info, get_nvidia_driver_info,
@@ -134,27 +134,9 @@ def config(request: Request):
for zone_name, zone in config_obj.cameras[camera_name].zones.items(): for zone_name, zone in config_obj.cameras[camera_name].zones.items():
camera_dict["zones"][zone_name]["color"] = zone.color camera_dict["zones"][zone_name]["color"] = zone.color
# remove go2rtc stream passwords
go2rtc: dict[str, any] = config_obj.go2rtc.model_dump(
mode="json", warnings="none", exclude_none=True
)
for stream_name, stream in go2rtc.get("streams", {}).items():
if stream is None:
continue
if isinstance(stream, str):
cleaned = clean_camera_user_pass(stream)
else:
cleaned = []
for item in stream:
cleaned.append(clean_camera_user_pass(item))
config["go2rtc"]["streams"][stream_name] = cleaned
config["plus"] = {"enabled": request.app.frigate_config.plus_api.is_active()} config["plus"] = {"enabled": request.app.frigate_config.plus_api.is_active()}
config["model"]["colormap"] = config_obj.model.colormap config["model"]["colormap"] = config_obj.model.colormap
# use merged labelamp
for detector_config in config["detectors"].values(): for detector_config in config["detectors"].values():
detector_config["model"]["labelmap"] = ( detector_config["model"]["labelmap"] = (
request.app.frigate_config.model.merged_labelmap request.app.frigate_config.model.merged_labelmap
@@ -165,7 +147,13 @@ def config(request: Request):
@router.get("/config/raw") @router.get("/config/raw")
def config_raw(): def config_raw():
config_file = find_config_file() config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
# Check if we can use .yaml instead of .yml
config_file_yaml = config_file.replace(".yml", ".yaml")
if os.path.isfile(config_file_yaml):
config_file = config_file_yaml
if not os.path.isfile(config_file): if not os.path.isfile(config_file):
return JSONResponse( return JSONResponse(
@@ -210,7 +198,13 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
# Save the config to file # Save the config to file
try: try:
config_file = find_config_file() config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
# Check if we can use .yaml instead of .yml
config_file_yaml = config_file.replace(".yml", ".yaml")
if os.path.isfile(config_file_yaml):
config_file = config_file_yaml
with open(config_file, "w") as f: with open(config_file, "w") as f:
f.write(new_config) f.write(new_config)
@@ -259,7 +253,13 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
@router.put("/config/set") @router.put("/config/set")
def config_set(request: Request, body: AppConfigSetBody): def config_set(request: Request, body: AppConfigSetBody):
config_file = find_config_file() config_file = os.environ.get("CONFIG_FILE", f"{CONFIG_DIR}/config.yml")
# Check if we can use .yaml instead of .yml
config_file_yaml = config_file.replace(".yml", ".yaml")
if os.path.isfile(config_file_yaml):
config_file = config_file_yaml
with open(config_file, "r") as f: with open(config_file, "r") as f:
old_raw_config = f.read() old_raw_config = f.read()

View File

@@ -18,7 +18,7 @@ from joserfc import jwt
from peewee import DoesNotExist from peewee import DoesNotExist
from slowapi import Limiter from slowapi import Limiter
from frigate.api.defs.request.app_body import ( from frigate.api.defs.app_body import (
AppPostLoginBody, AppPostLoginBody,
AppPostUsersBody, AppPostUsersBody,
AppPutPasswordBody, AppPutPasswordBody,
@@ -85,12 +85,7 @@ def get_remote_addr(request: Request):
return str(ip) return str(ip)
# if there wasn't anything in the route, just return the default # if there wasn't anything in the route, just return the default
remote_addr = None return request.remote_addr or "127.0.0.1"
if hasattr(request, "remote_addr"):
remote_addr = request.remote_addr
return remote_addr or "127.0.0.1"
def get_jwt_secret() -> str: def get_jwt_secret() -> str:
@@ -329,7 +324,7 @@ def login(request: Request, body: AppPostLoginBody):
try: try:
db_user: User = User.get_by_id(user) db_user: User = User.get_by_id(user)
except DoesNotExist: except DoesNotExist:
return JSONResponse(content={"message": "Login failed"}, status_code=401) return JSONResponse(content={"message": "Login failed"}, status_code=400)
password_hash = db_user.password_hash password_hash = db_user.password_hash
if verify_password(password, password_hash): if verify_password(password, password_hash):
@@ -340,7 +335,7 @@ def login(request: Request, body: AppPostLoginBody):
response, JWT_COOKIE_NAME, encoded_jwt, expiration, JWT_COOKIE_SECURE response, JWT_COOKIE_NAME, encoded_jwt, expiration, JWT_COOKIE_SECURE
) )
return response return response
return JSONResponse(content={"message": "Login failed"}, status_code=401) return JSONResponse(content={"message": "Login failed"}, status_code=400)
@router.get("/users") @router.get("/users")

View File

@@ -1,4 +1,4 @@
from typing import List, Optional, Union from typing import Optional, Union
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
@@ -17,18 +17,14 @@ class EventsDescriptionBody(BaseModel):
class EventsCreateBody(BaseModel): class EventsCreateBody(BaseModel):
source_type: Optional[str] = "api" source_type: Optional[str] = "api"
sub_label: Optional[str] = None sub_label: Optional[str] = None
score: Optional[float] = 0 score: Optional[int] = 0
duration: Optional[int] = 30 duration: Optional[int] = 30
include_recording: Optional[bool] = True include_recording: Optional[bool] = True
draw: Optional[dict] = {} draw: Optional[dict] = {}
class EventsEndBody(BaseModel): class EventsEndBody(BaseModel):
end_time: Optional[float] = None end_time: Optional[int] = None
class EventsDeleteBody(BaseModel):
event_ids: List[str] = Field(title="The event IDs to delete")
class SubmitPlusBody(BaseModel): class SubmitPlusBody(BaseModel):

View File

@@ -28,7 +28,6 @@ class EventsQueryParams(BaseModel):
is_submitted: Optional[int] = None is_submitted: Optional[int] = None
min_length: Optional[float] = None min_length: Optional[float] = None
max_length: Optional[float] = None max_length: Optional[float] = None
event_id: Optional[str] = None
sort: Optional[str] = None sort: Optional[str] = None
timezone: Optional[str] = "utc" timezone: Optional[str] = "utc"
@@ -47,7 +46,6 @@ class EventsSearchQueryParams(BaseModel):
time_range: Optional[str] = DEFAULT_TIME_RANGE time_range: Optional[str] = DEFAULT_TIME_RANGE
has_clip: Optional[bool] = None has_clip: Optional[bool] = None
has_snapshot: Optional[bool] = None has_snapshot: Optional[bool] = None
is_submitted: Optional[bool] = None
timezone: Optional[str] = "utc" timezone: Optional[str] = "utc"
min_score: Optional[float] = None min_score: Optional[float] = None
max_score: Optional[float] = None max_score: Optional[float] = None

View File

@@ -1,31 +0,0 @@
from typing import Union
from pydantic import BaseModel
from pydantic.json_schema import SkipJsonSchema
from frigate.review.types import SeverityEnum
class ReviewQueryParams(BaseModel):
cameras: str = "all"
labels: str = "all"
zones: str = "all"
reviewed: int = 0
limit: Union[int, SkipJsonSchema[None]] = None
severity: Union[SeverityEnum, SkipJsonSchema[None]] = None
before: Union[float, SkipJsonSchema[None]] = None
after: Union[float, SkipJsonSchema[None]] = None
class ReviewSummaryQueryParams(BaseModel):
cameras: str = "all"
labels: str = "all"
zones: str = "all"
timezone: str = "utc"
class ReviewActivityMotionQueryParams(BaseModel):
cameras: str = "all"
before: Union[float, SkipJsonSchema[None]] = None
after: Union[float, SkipJsonSchema[None]] = None
scale: int = 30

View File

@@ -1,20 +0,0 @@
from typing import Union
from pydantic import BaseModel, Field
from pydantic.json_schema import SkipJsonSchema
from frigate.record.export import (
PlaybackFactorEnum,
PlaybackSourceEnum,
)
class ExportRecordingsBody(BaseModel):
playback: PlaybackFactorEnum = Field(
default=PlaybackFactorEnum.realtime, title="Playback factor"
)
source: PlaybackSourceEnum = Field(
default=PlaybackSourceEnum.recordings, title="Playback source"
)
name: str = Field(title="Friendly name", default=None, max_length=256)
image_path: Union[str, SkipJsonSchema[None]] = None

View File

@@ -1,6 +0,0 @@
from pydantic import BaseModel, conlist, constr
class ReviewModifyMultipleBody(BaseModel):
# List of string with at least one element and each element with at least one char
ids: conlist(constr(min_length=1), min_length=1)

View File

@@ -1,42 +0,0 @@
from typing import Any, Optional
from pydantic import BaseModel, ConfigDict
class EventResponse(BaseModel):
id: str
label: str
sub_label: Optional[str]
camera: str
start_time: float
end_time: Optional[float]
false_positive: Optional[bool]
zones: list[str]
thumbnail: str
has_clip: bool
has_snapshot: bool
retain_indefinitely: bool
plus_id: Optional[str]
model_hash: Optional[str]
detector_type: Optional[str]
model_type: Optional[str]
data: dict[str, Any]
model_config = ConfigDict(protected_namespaces=())
class EventCreateResponse(BaseModel):
success: bool
message: str
event_id: str
class EventMultiDeleteResponse(BaseModel):
success: bool
deleted_events: list[str]
not_found_events: list[str]
class EventUploadPlusResponse(BaseModel):
success: bool
plus_id: str

View File

@@ -1,6 +0,0 @@
from pydantic import BaseModel
class GenericResponse(BaseModel):
success: bool
message: str

View File

@@ -1,43 +0,0 @@
from datetime import datetime
from typing import Dict
from pydantic import BaseModel, Json
from frigate.review.types import SeverityEnum
class ReviewSegmentResponse(BaseModel):
id: str
camera: str
start_time: datetime
end_time: datetime
has_been_reviewed: bool
severity: SeverityEnum
thumb_path: str
data: Json
class Last24HoursReview(BaseModel):
reviewed_alert: int
reviewed_detection: int
total_alert: int
total_detection: int
class DayReview(BaseModel):
day: datetime
reviewed_alert: int
reviewed_detection: int
total_alert: int
total_detection: int
class ReviewSummaryResponse(BaseModel):
last24Hours: Last24HoursReview
root: Dict[str, DayReview]
class ReviewActivityMotionResponse(BaseModel):
start_time: int
motion: float
camera: str

View File

@@ -0,0 +1,28 @@
from typing import Optional
from pydantic import BaseModel
class ReviewQueryParams(BaseModel):
cameras: Optional[str] = "all"
labels: Optional[str] = "all"
zones: Optional[str] = "all"
reviewed: Optional[int] = 0
limit: Optional[int] = None
severity: Optional[str] = None
before: Optional[float] = None
after: Optional[float] = None
class ReviewSummaryQueryParams(BaseModel):
cameras: Optional[str] = "all"
labels: Optional[str] = "all"
zones: Optional[str] = "all"
timezone: Optional[str] = "utc"
class ReviewActivityMotionQueryParams(BaseModel):
cameras: Optional[str] = "all"
before: Optional[float] = None
after: Optional[float] = None
scale: Optional[int] = 30

View File

@@ -14,36 +14,29 @@ from fastapi.responses import JSONResponse
from peewee import JOIN, DoesNotExist, fn, operator from peewee import JOIN, DoesNotExist, fn, operator
from playhouse.shortcuts import model_to_dict from playhouse.shortcuts import model_to_dict
from frigate.api.defs.query.events_query_parameters import ( from frigate.api.defs.events_body import (
DEFAULT_TIME_RANGE,
EventsQueryParams,
EventsSearchQueryParams,
EventsSummaryQueryParams,
)
from frigate.api.defs.query.regenerate_query_parameters import (
RegenerateQueryParameters,
)
from frigate.api.defs.request.events_body import (
EventsCreateBody, EventsCreateBody,
EventsDeleteBody,
EventsDescriptionBody, EventsDescriptionBody,
EventsEndBody, EventsEndBody,
EventsSubLabelBody, EventsSubLabelBody,
SubmitPlusBody, SubmitPlusBody,
) )
from frigate.api.defs.response.event_response import ( from frigate.api.defs.events_query_parameters import (
EventCreateResponse, DEFAULT_TIME_RANGE,
EventMultiDeleteResponse, EventsQueryParams,
EventResponse, EventsSearchQueryParams,
EventUploadPlusResponse, EventsSummaryQueryParams,
)
from frigate.api.defs.regenerate_query_parameters import (
RegenerateQueryParameters,
) )
from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.const import CLIPS_DIR from frigate.const import (
CLIPS_DIR,
)
from frigate.embeddings import EmbeddingsContext from frigate.embeddings import EmbeddingsContext
from frigate.events.external import ExternalEventProcessor
from frigate.models import Event, ReviewSegment, Timeline from frigate.models import Event, ReviewSegment, Timeline
from frigate.object_processing import TrackedObject, TrackedObjectProcessor from frigate.object_processing import TrackedObject
from frigate.util.builtin import get_tz_modifiers from frigate.util.builtin import get_tz_modifiers
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -51,7 +44,7 @@ logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.events]) router = APIRouter(tags=[Tags.events])
@router.get("/events", response_model=list[EventResponse]) @router.get("/events")
def events(params: EventsQueryParams = Depends()): def events(params: EventsQueryParams = Depends()):
camera = params.camera camera = params.camera
cameras = params.cameras cameras = params.cameras
@@ -95,7 +88,6 @@ def events(params: EventsQueryParams = Depends()):
is_submitted = params.is_submitted is_submitted = params.is_submitted
min_length = params.min_length min_length = params.min_length
max_length = params.max_length max_length = params.max_length
event_id = params.event_id
sort = params.sort sort = params.sort
@@ -238,9 +230,6 @@ def events(params: EventsQueryParams = Depends()):
elif is_submitted > 0: elif is_submitted > 0:
clauses.append((Event.plus_id != "")) clauses.append((Event.plus_id != ""))
if event_id is not None:
clauses.append((Event.id == event_id))
if len(clauses) == 0: if len(clauses) == 0:
clauses.append((True)) clauses.append((True))
@@ -253,8 +242,6 @@ def events(params: EventsQueryParams = Depends()):
order_by = Event.start_time.asc() order_by = Event.start_time.asc()
elif sort == "date_desc": elif sort == "date_desc":
order_by = Event.start_time.desc() order_by = Event.start_time.desc()
else:
order_by = Event.start_time.desc()
else: else:
order_by = Event.start_time.desc() order_by = Event.start_time.desc()
@@ -270,7 +257,7 @@ def events(params: EventsQueryParams = Depends()):
return JSONResponse(content=list(events)) return JSONResponse(content=list(events))
@router.get("/events/explore", response_model=list[EventResponse]) @router.get("/events/explore")
def events_explore(limit: int = 10): def events_explore(limit: int = 10):
# get distinct labels for all events # get distinct labels for all events
distinct_labels = Event.select(Event.label).distinct().order_by(Event.label) distinct_labels = Event.select(Event.label).distinct().order_by(Event.label)
@@ -315,8 +302,7 @@ def events_explore(limit: int = 10):
"data": { "data": {
k: v k: v
for k, v in event.data.items() for k, v in event.data.items()
if k if k in ["type", "score", "top_score", "description"]
in ["type", "score", "top_score", "description", "sub_label_score"]
}, },
"event_count": label_counts[event.label], "event_count": label_counts[event.label],
} }
@@ -332,7 +318,7 @@ def events_explore(limit: int = 10):
return JSONResponse(content=processed_events) return JSONResponse(content=processed_events)
@router.get("/event_ids", response_model=list[EventResponse]) @router.get("/event_ids")
def event_ids(ids: str): def event_ids(ids: str):
ids = ids.split(",") ids = ids.split(",")
@@ -370,7 +356,6 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
time_range = params.time_range time_range = params.time_range
has_clip = params.has_clip has_clip = params.has_clip
has_snapshot = params.has_snapshot has_snapshot = params.has_snapshot
is_submitted = params.is_submitted
# for similarity search # for similarity search
event_id = params.event_id event_id = params.event_id
@@ -409,7 +394,6 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
Event.end_time, Event.end_time,
Event.has_clip, Event.has_clip,
Event.has_snapshot, Event.has_snapshot,
Event.top_score,
Event.data, Event.data,
Event.plus_id, Event.plus_id,
ReviewSegment.thumb_path, ReviewSegment.thumb_path,
@@ -452,12 +436,6 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
if has_snapshot is not None: if has_snapshot is not None:
event_filters.append((Event.has_snapshot == has_snapshot)) event_filters.append((Event.has_snapshot == has_snapshot))
if is_submitted is not None:
if is_submitted == 0:
event_filters.append((Event.plus_id.is_null()))
elif is_submitted > 0:
event_filters.append((Event.plus_id != ""))
if min_score is not None and max_score is not None: if min_score is not None and max_score is not None:
event_filters.append((Event.data["score"].between(min_score, max_score))) event_filters.append((Event.data["score"].between(min_score, max_score)))
else: else:
@@ -590,17 +568,19 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
processed_events.append(processed_event) processed_events.append(processed_event)
if (sort is None or sort == "relevance") and search_results: # Sort by search distance if search_results are available, otherwise by start_time as default
if search_results:
processed_events.sort(key=lambda x: x.get("search_distance", float("inf"))) processed_events.sort(key=lambda x: x.get("search_distance", float("inf")))
elif min_score is not None and max_score is not None and sort == "score_asc":
processed_events.sort(key=lambda x: x["score"])
elif min_score is not None and max_score is not None and sort == "score_desc":
processed_events.sort(key=lambda x: x["score"], reverse=True)
elif sort == "date_asc":
processed_events.sort(key=lambda x: x["start_time"])
else: else:
# "date_desc" default if sort == "score_asc":
processed_events.sort(key=lambda x: x["start_time"], reverse=True) processed_events.sort(key=lambda x: x["score"])
elif sort == "score_desc":
processed_events.sort(key=lambda x: x["score"], reverse=True)
elif sort == "date_asc":
processed_events.sort(key=lambda x: x["start_time"])
else:
# "date_desc" default
processed_events.sort(key=lambda x: x["start_time"], reverse=True)
# Limit the number of events returned # Limit the number of events returned
processed_events = processed_events[:limit] processed_events = processed_events[:limit]
@@ -653,7 +633,7 @@ def events_summary(params: EventsSummaryQueryParams = Depends()):
return JSONResponse(content=[e for e in groups.dicts()]) return JSONResponse(content=[e for e in groups.dicts()])
@router.get("/events/{event_id}", response_model=EventResponse) @router.get("/events/{event_id}")
def event(event_id: str): def event(event_id: str):
try: try:
return model_to_dict(Event.get(Event.id == event_id)) return model_to_dict(Event.get(Event.id == event_id))
@@ -661,7 +641,7 @@ def event(event_id: str):
return JSONResponse(content="Event not found", status_code=404) return JSONResponse(content="Event not found", status_code=404)
@router.post("/events/{event_id}/retain", response_model=GenericResponse) @router.post("/events/{event_id}/retain")
def set_retain(event_id: str): def set_retain(event_id: str):
try: try:
event = Event.get(Event.id == event_id) event = Event.get(Event.id == event_id)
@@ -680,7 +660,7 @@ def set_retain(event_id: str):
) )
@router.post("/events/{event_id}/plus", response_model=EventUploadPlusResponse) @router.post("/events/{event_id}/plus")
def send_to_plus(request: Request, event_id: str, body: SubmitPlusBody = None): def send_to_plus(request: Request, event_id: str, body: SubmitPlusBody = None):
if not request.app.frigate_config.plus_api.is_active(): if not request.app.frigate_config.plus_api.is_active():
message = "PLUS_API_KEY environment variable is not set" message = "PLUS_API_KEY environment variable is not set"
@@ -792,7 +772,7 @@ def send_to_plus(request: Request, event_id: str, body: SubmitPlusBody = None):
) )
@router.put("/events/{event_id}/false_positive", response_model=EventUploadPlusResponse) @router.put("/events/{event_id}/false_positive")
def false_positive(request: Request, event_id: str): def false_positive(request: Request, event_id: str):
if not request.app.frigate_config.plus_api.is_active(): if not request.app.frigate_config.plus_api.is_active():
message = "PLUS_API_KEY environment variable is not set" message = "PLUS_API_KEY environment variable is not set"
@@ -881,7 +861,7 @@ def false_positive(request: Request, event_id: str):
) )
@router.delete("/events/{event_id}/retain", response_model=GenericResponse) @router.delete("/events/{event_id}/retain")
def delete_retain(event_id: str): def delete_retain(event_id: str):
try: try:
event = Event.get(Event.id == event_id) event = Event.get(Event.id == event_id)
@@ -900,7 +880,7 @@ def delete_retain(event_id: str):
) )
@router.post("/events/{event_id}/sub_label", response_model=GenericResponse) @router.post("/events/{event_id}/sub_label")
def set_sub_label( def set_sub_label(
request: Request, request: Request,
event_id: str, event_id: str,
@@ -952,7 +932,7 @@ def set_sub_label(
) )
@router.post("/events/{event_id}/description", response_model=GenericResponse) @router.post("/events/{event_id}/description")
def set_description( def set_description(
request: Request, request: Request,
event_id: str, event_id: str,
@@ -999,7 +979,7 @@ def set_description(
) )
@router.put("/events/{event_id}/description/regenerate", response_model=GenericResponse) @router.put("/events/{event_id}/description/regenerate")
def regenerate_description( def regenerate_description(
request: Request, event_id: str, params: RegenerateQueryParameters = Depends() request: Request, event_id: str, params: RegenerateQueryParameters = Depends()
): ):
@@ -1011,11 +991,9 @@ def regenerate_description(
status_code=404, status_code=404,
) )
camera_config = request.app.frigate_config.cameras[event.camera]
if ( if (
request.app.frigate_config.semantic_search.enabled request.app.frigate_config.semantic_search.enabled
and camera_config.genai.enabled and request.app.frigate_config.genai.enabled
): ):
request.app.event_metadata_updater.publish((event.id, params.source)) request.app.event_metadata_updater.publish((event.id, params.source))
@@ -1036,74 +1014,47 @@ def regenerate_description(
content=( content=(
{ {
"success": False, "success": False,
"message": "Semantic Search and Generative AI must be enabled to regenerate a description", "message": "Semantic search and generative AI are not enabled",
} }
), ),
status_code=400, status_code=400,
) )
def delete_single_event(event_id: str, request: Request) -> dict: @router.delete("/events/{event_id}")
def delete_event(request: Request, event_id: str):
try: try:
event = Event.get(Event.id == event_id) event = Event.get(Event.id == event_id)
except DoesNotExist: except DoesNotExist:
return {"success": False, "message": f"Event {event_id} not found"} return JSONResponse(
content=({"success": False, "message": "Event " + event_id + " not found"}),
status_code=404,
)
media_name = f"{event.camera}-{event.id}" media_name = f"{event.camera}-{event.id}"
if event.has_snapshot: if event.has_snapshot:
snapshot_paths = [ media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg"), media.unlink(missing_ok=True)
Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"), media = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
] media.unlink(missing_ok=True)
for media in snapshot_paths: if event.has_clip:
media.unlink(missing_ok=True) media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
media.unlink(missing_ok=True)
event.delete_instance() event.delete_instance()
Timeline.delete().where(Timeline.source_id == event_id).execute() Timeline.delete().where(Timeline.source_id == event_id).execute()
# If semantic search is enabled, update the index # If semantic search is enabled, update the index
if request.app.frigate_config.semantic_search.enabled: if request.app.frigate_config.semantic_search.enabled:
context: EmbeddingsContext = request.app.embeddings context: EmbeddingsContext = request.app.embeddings
context.db.delete_embeddings_thumbnail(event_ids=[event_id]) context.db.delete_embeddings_thumbnail(event_ids=[event_id])
context.db.delete_embeddings_description(event_ids=[event_id]) context.db.delete_embeddings_description(event_ids=[event_id])
return JSONResponse(
return {"success": True, "message": f"Event {event_id} deleted"} content=({"success": True, "message": "Event " + event_id + " deleted"}),
status_code=200,
)
@router.delete("/events/{event_id}", response_model=GenericResponse) @router.post("/events/{camera_name}/{label}/create")
def delete_event(request: Request, event_id: str):
result = delete_single_event(event_id, request)
status_code = 200 if result["success"] else 404
return JSONResponse(content=result, status_code=status_code)
@router.delete("/events/", response_model=EventMultiDeleteResponse)
def delete_events(request: Request, body: EventsDeleteBody):
if not body.event_ids:
return JSONResponse(
content=({"success": False, "message": "No event IDs provided."}),
status_code=404,
)
deleted_events = []
not_found_events = []
for event_id in body.event_ids:
result = delete_single_event(event_id, request)
if result["success"]:
deleted_events.append(event_id)
else:
not_found_events.append(event_id)
response = {
"success": True,
"deleted_events": deleted_events,
"not_found_events": not_found_events,
}
return JSONResponse(content=response, status_code=200)
@router.post("/events/{camera_name}/{label}/create", response_model=EventCreateResponse)
def create_event( def create_event(
request: Request, request: Request,
camera_name: str, camera_name: str,
@@ -1125,11 +1076,9 @@ def create_event(
) )
try: try:
frame_processor: TrackedObjectProcessor = request.app.detected_frames_processor frame = request.app.detected_frames_processor.get_current_frame(camera_name)
external_processor: ExternalEventProcessor = request.app.external_processor
frame = frame_processor.get_current_frame(camera_name) event_id = request.app.external_processor.create_manual_event(
event_id = external_processor.create_manual_event(
camera_name, camera_name,
label, label,
body.source_type, body.source_type,
@@ -1159,7 +1108,7 @@ def create_event(
) )
@router.put("/events/{event_id}/end", response_model=GenericResponse) @router.put("/events/{event_id}/end")
def end_event(request: Request, event_id: str, body: EventsEndBody): def end_event(request: Request, event_id: str, body: EventsEndBody):
try: try:
end_time = body.end_time or datetime.datetime.now().timestamp() end_time = body.end_time or datetime.datetime.now().timestamp()

View File

@@ -4,23 +4,17 @@ import logging
import random import random
import string import string
from pathlib import Path from pathlib import Path
from typing import Optional
import psutil import psutil
from fastapi import APIRouter, Request from fastapi import APIRouter, Request
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from peewee import DoesNotExist from peewee import DoesNotExist
from playhouse.shortcuts import model_to_dict
from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.const import EXPORT_DIR from frigate.const import EXPORT_DIR
from frigate.models import Export, Previews, Recordings from frigate.models import Export, Recordings
from frigate.record.export import ( from frigate.record.export import PlaybackFactorEnum, RecordingExporter
PlaybackFactorEnum,
PlaybackSourceEnum,
RecordingExporter,
)
from frigate.util.builtin import is_current_hour
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -39,7 +33,7 @@ def export_recording(
camera_name: str, camera_name: str,
start_time: float, start_time: float,
end_time: float, end_time: float,
body: ExportRecordingsBody, body: dict = None,
): ):
if not camera_name or not request.app.frigate_config.cameras.get(camera_name): if not camera_name or not request.app.frigate_config.cameras.get(camera_name):
return JSONResponse( return JSONResponse(
@@ -49,52 +43,36 @@ def export_recording(
status_code=404, status_code=404,
) )
playback_factor = body.playback json: dict[str, any] = body or {}
playback_source = body.source playback_factor = json.get("playback", "realtime")
friendly_name = body.name friendly_name: Optional[str] = json.get("name")
existing_image = body.image_path
if playback_source == "recordings": if len(friendly_name or "") > 256:
recordings_count = ( return JSONResponse(
Recordings.select() content=({"success": False, "message": "File name is too long."}),
.where( status_code=401,
Recordings.start_time.between(start_time, end_time)
| Recordings.end_time.between(start_time, end_time)
| (
(start_time > Recordings.start_time)
& (end_time < Recordings.end_time)
)
)
.where(Recordings.camera == camera_name)
.count()
) )
if recordings_count <= 0: existing_image = json.get("image_path")
return JSONResponse(
content=(
{"success": False, "message": "No recordings found for time range"}
),
status_code=400,
)
else:
previews_count = (
Previews.select()
.where(
Previews.start_time.between(start_time, end_time)
| Previews.end_time.between(start_time, end_time)
| ((start_time > Previews.start_time) & (end_time < Previews.end_time))
)
.where(Previews.camera == camera_name)
.count()
)
if not is_current_hour(start_time) and previews_count <= 0: recordings_count = (
return JSONResponse( Recordings.select()
content=( .where(
{"success": False, "message": "No previews found for time range"} Recordings.start_time.between(start_time, end_time)
), | Recordings.end_time.between(start_time, end_time)
status_code=400, | ((start_time > Recordings.start_time) & (end_time < Recordings.end_time))
) )
.where(Recordings.camera == camera_name)
.count()
)
if recordings_count <= 0:
return JSONResponse(
content=(
{"success": False, "message": "No recordings found for time range"}
),
status_code=400,
)
export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}" export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}"
exporter = RecordingExporter( exporter = RecordingExporter(
@@ -110,11 +88,6 @@ def export_recording(
if playback_factor in PlaybackFactorEnum.__members__.values() if playback_factor in PlaybackFactorEnum.__members__.values()
else PlaybackFactorEnum.realtime else PlaybackFactorEnum.realtime
), ),
(
PlaybackSourceEnum[playback_source]
if playback_source in PlaybackSourceEnum.__members__.values()
else PlaybackSourceEnum.recordings
),
) )
exporter.start() exporter.start()
return JSONResponse( return JSONResponse(
@@ -208,14 +181,3 @@ def export_delete(event_id: str):
), ),
status_code=200, status_code=200,
) )
@router.get("/exports/{export_id}")
def get_export(export_id: str):
try:
return JSONResponse(content=model_to_dict(Export.get(Export.id == export_id)))
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export not found"},
status_code=404,
)

View File

@@ -82,16 +82,8 @@ def create_fastapi_app(
database.close() database.close()
return response return response
@app.on_event("startup")
async def startup():
logger.info("FastAPI started")
# Rate limiter (used for login endpoint) # Rate limiter (used for login endpoint)
if frigate_config.auth.failed_login_rate_limit is None: auth.rateLimiter.set_limit(frigate_config.auth.failed_login_rate_limit or "")
limiter.enabled = False
else:
auth.rateLimiter.set_limit(frigate_config.auth.failed_login_rate_limit)
app.state.limiter = limiter app.state.limiter = limiter
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler) app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
app.add_middleware(SlowAPIMiddleware) app.add_middleware(SlowAPIMiddleware)

View File

@@ -20,7 +20,7 @@ from pathvalidate import sanitize_filename
from peewee import DoesNotExist, fn from peewee import DoesNotExist, fn
from tzlocal import get_localzone_name from tzlocal import get_localzone_name
from frigate.api.defs.query.media_query_parameters import ( from frigate.api.defs.media_query_parameters import (
Extension, Extension,
MediaEventsSnapshotQueryParams, MediaEventsSnapshotQueryParams,
MediaLatestFrameQueryParams, MediaLatestFrameQueryParams,
@@ -36,7 +36,6 @@ from frigate.const import (
RECORD_DIR, RECORD_DIR,
) )
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
from frigate.object_processing import TrackedObjectProcessor
from frigate.util.builtin import get_tz_modifiers from frigate.util.builtin import get_tz_modifiers
from frigate.util.image import get_image_from_recording from frigate.util.image import get_image_from_recording
@@ -80,11 +79,7 @@ def mjpeg_feed(
def imagestream( def imagestream(
detected_frames_processor: TrackedObjectProcessor, detected_frames_processor, camera_name: str, fps: int, height: int, draw_options
camera_name: str,
fps: int,
height: int,
draw_options: dict[str, any],
): ):
while True: while True:
# max out at specified FPS # max out at specified FPS
@@ -123,7 +118,6 @@ def latest_frame(
extension: Extension, extension: Extension,
params: MediaLatestFrameQueryParams = Depends(), params: MediaLatestFrameQueryParams = Depends(),
): ):
frame_processor: TrackedObjectProcessor = request.app.detected_frames_processor
draw_options = { draw_options = {
"bounding_boxes": params.bbox, "bounding_boxes": params.bbox,
"timestamp": params.timestamp, "timestamp": params.timestamp,
@@ -135,14 +129,17 @@ def latest_frame(
quality = params.quality quality = params.quality
if camera_name in request.app.frigate_config.cameras: if camera_name in request.app.frigate_config.cameras:
frame = frame_processor.get_current_frame(camera_name, draw_options) frame = request.app.detected_frames_processor.get_current_frame(
camera_name, draw_options
)
retry_interval = float( retry_interval = float(
request.app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval request.app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval
or 10 or 10
) )
if frame is None or datetime.now().timestamp() > ( if frame is None or datetime.now().timestamp() > (
frame_processor.get_current_frame_time(camera_name) + retry_interval request.app.detected_frames_processor.get_current_frame_time(camera_name)
+ retry_interval
): ):
if request.app.camera_error_image is None: if request.app.camera_error_image is None:
error_image = glob.glob("/opt/frigate/frigate/images/camera-error.jpg") error_image = glob.glob("/opt/frigate/frigate/images/camera-error.jpg")
@@ -183,7 +180,7 @@ def latest_frame(
) )
elif camera_name == "birdseye" and request.app.frigate_config.birdseye.restream: elif camera_name == "birdseye" and request.app.frigate_config.birdseye.restream:
frame = cv2.cvtColor( frame = cv2.cvtColor(
frame_processor.get_current_frame(camera_name), request.app.detected_frames_processor.get_current_frame(camera_name),
cv2.COLOR_YUV2BGR_I420, cv2.COLOR_YUV2BGR_I420,
) )
@@ -463,8 +460,8 @@ def recording_clip(
text=False, text=False,
) as ffmpeg: ) as ffmpeg:
while True: while True:
data = ffmpeg.stdout.read(8192) data = ffmpeg.stdout.read(1024)
if data is not None and len(data) > 0: if data is not None:
yield data yield data
else: else:
if ffmpeg.returncode and ffmpeg.returncode != 0: if ffmpeg.returncode and ffmpeg.returncode != 0:
@@ -816,15 +813,15 @@ def grid_snapshot(
): ):
if camera_name in request.app.frigate_config.cameras: if camera_name in request.app.frigate_config.cameras:
detect = request.app.frigate_config.cameras[camera_name].detect detect = request.app.frigate_config.cameras[camera_name].detect
frame_processor: TrackedObjectProcessor = request.app.detected_frames_processor frame = request.app.detected_frames_processor.get_current_frame(camera_name, {})
frame = frame_processor.get_current_frame(camera_name, {})
retry_interval = float( retry_interval = float(
request.app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval request.app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval
or 10 or 10
) )
if frame is None or datetime.now().timestamp() > ( if frame is None or datetime.now().timestamp() > (
frame_processor.get_current_frame_time(camera_name) + retry_interval request.app.detected_frames_processor.get_current_frame_time(camera_name)
+ retry_interval
): ):
return JSONResponse( return JSONResponse(
content={"success": False, "message": "Unable to get valid frame"}, content={"success": False, "message": "Unable to get valid frame"},
@@ -920,7 +917,7 @@ def grid_snapshot(
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
return Response( return Response(
jpg.tobytes(), jpg.tobytes,
media_type="image/jpeg", media_type="image/jpeg",
headers={"Cache-Control": "no-store"}, headers={"Cache-Control": "no-store"},
) )
@@ -1456,6 +1453,7 @@ def preview_thumbnail(file_name: str):
return Response( return Response(
jpg_bytes, jpg_bytes,
# FIXME: Shouldn't it be either jpg or webp depending on the endpoint?
media_type="image/webp", media_type="image/webp",
headers={ headers={
"Content-Type": "image/webp", "Content-Type": "image/webp",
@@ -1484,7 +1482,7 @@ def label_thumbnail(request: Request, camera_name: str, label: str):
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
return Response( return Response(
jpg.tobytes(), jpg.tobytes,
media_type="image/jpeg", media_type="image/jpeg",
headers={"Cache-Control": "no-store"}, headers={"Cache-Control": "no-store"},
) )
@@ -1537,6 +1535,6 @@ def label_snapshot(request: Request, camera_name: str, label: str):
_, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) _, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
return Response( return Response(
jpg.tobytes(), jpg.tobytes,
media_type="image/jpeg", media_type="image/jpeg",
) )

View File

@@ -12,21 +12,13 @@ from fastapi.responses import JSONResponse
from peewee import Case, DoesNotExist, fn, operator from peewee import Case, DoesNotExist, fn, operator
from playhouse.shortcuts import model_to_dict from playhouse.shortcuts import model_to_dict
from frigate.api.defs.query.review_query_parameters import ( from frigate.api.defs.review_query_parameters import (
ReviewActivityMotionQueryParams, ReviewActivityMotionQueryParams,
ReviewQueryParams, ReviewQueryParams,
ReviewSummaryQueryParams, ReviewSummaryQueryParams,
) )
from frigate.api.defs.request.review_body import ReviewModifyMultipleBody
from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.response.review_response import (
ReviewActivityMotionResponse,
ReviewSegmentResponse,
ReviewSummaryResponse,
)
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.models import Recordings, ReviewSegment from frigate.models import Recordings, ReviewSegment
from frigate.review.types import SeverityEnum
from frigate.util.builtin import get_tz_modifiers from frigate.util.builtin import get_tz_modifiers
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -34,7 +26,7 @@ logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.review]) router = APIRouter(tags=[Tags.review])
@router.get("/review", response_model=list[ReviewSegmentResponse]) @router.get("/review")
def review(params: ReviewQueryParams = Depends()): def review(params: ReviewQueryParams = Depends()):
cameras = params.cameras cameras = params.cameras
labels = params.labels labels = params.labels
@@ -110,7 +102,7 @@ def review(params: ReviewQueryParams = Depends()):
return JSONResponse(content=[r for r in review]) return JSONResponse(content=[r for r in review])
@router.get("/review/summary", response_model=ReviewSummaryResponse) @router.get("/review/summary")
def review_summary(params: ReviewSummaryQueryParams = Depends()): def review_summary(params: ReviewSummaryQueryParams = Depends()):
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone) hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp() day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
@@ -162,7 +154,7 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
None, None,
[ [
( (
(ReviewSegment.severity == SeverityEnum.alert), (ReviewSegment.severity == "alert"),
ReviewSegment.has_been_reviewed, ReviewSegment.has_been_reviewed,
) )
], ],
@@ -174,7 +166,7 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
None, None,
[ [
( (
(ReviewSegment.severity == SeverityEnum.detection), (ReviewSegment.severity == "detection"),
ReviewSegment.has_been_reviewed, ReviewSegment.has_been_reviewed,
) )
], ],
@@ -186,7 +178,19 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
None, None,
[ [
( (
(ReviewSegment.severity == SeverityEnum.alert), (ReviewSegment.severity == "significant_motion"),
ReviewSegment.has_been_reviewed,
)
],
0,
)
).alias("reviewed_motion"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == "alert"),
1, 1,
) )
], ],
@@ -198,13 +202,25 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
None, None,
[ [
( (
(ReviewSegment.severity == SeverityEnum.detection), (ReviewSegment.severity == "detection"),
1, 1,
) )
], ],
0, 0,
) )
).alias("total_detection"), ).alias("total_detection"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == "significant_motion"),
1,
)
],
0,
)
).alias("total_motion"),
) )
.where(reduce(operator.and_, clauses)) .where(reduce(operator.and_, clauses))
.dicts() .dicts()
@@ -231,7 +247,6 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
label_clause = reduce(operator.or_, label_clauses) label_clause = reduce(operator.or_, label_clauses)
clauses.append((label_clause)) clauses.append((label_clause))
day_in_seconds = 60 * 60 * 24
last_month = ( last_month = (
ReviewSegment.select( ReviewSegment.select(
fn.strftime( fn.strftime(
@@ -248,7 +263,7 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
None, None,
[ [
( (
(ReviewSegment.severity == SeverityEnum.alert), (ReviewSegment.severity == "alert"),
ReviewSegment.has_been_reviewed, ReviewSegment.has_been_reviewed,
) )
], ],
@@ -260,7 +275,7 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
None, None,
[ [
( (
(ReviewSegment.severity == SeverityEnum.detection), (ReviewSegment.severity == "detection"),
ReviewSegment.has_been_reviewed, ReviewSegment.has_been_reviewed,
) )
], ],
@@ -272,7 +287,19 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
None, None,
[ [
( (
(ReviewSegment.severity == SeverityEnum.alert), (ReviewSegment.severity == "significant_motion"),
ReviewSegment.has_been_reviewed,
)
],
0,
)
).alias("reviewed_motion"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == "alert"),
1, 1,
) )
], ],
@@ -284,17 +311,29 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
None, None,
[ [
( (
(ReviewSegment.severity == SeverityEnum.detection), (ReviewSegment.severity == "detection"),
1, 1,
) )
], ],
0, 0,
) )
).alias("total_detection"), ).alias("total_detection"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == "significant_motion"),
1,
)
],
0,
)
).alias("total_motion"),
) )
.where(reduce(operator.and_, clauses)) .where(reduce(operator.and_, clauses))
.group_by( .group_by(
(ReviewSegment.start_time + seconds_offset).cast("int") / day_in_seconds, (ReviewSegment.start_time + seconds_offset).cast("int") / (3600 * 24),
) )
.order_by(ReviewSegment.start_time.desc()) .order_by(ReviewSegment.start_time.desc())
) )
@@ -309,10 +348,19 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
return JSONResponse(content=data) return JSONResponse(content=data)
@router.post("/reviews/viewed", response_model=GenericResponse) @router.post("/reviews/viewed")
def set_multiple_reviewed(body: ReviewModifyMultipleBody): def set_multiple_reviewed(body: dict = None):
json: dict[str, any] = body or {}
list_of_ids = json.get("ids", "")
if not list_of_ids or len(list_of_ids) == 0:
return JSONResponse(
context=({"success": False, "message": "Not a valid list of ids"}),
status_code=404,
)
ReviewSegment.update(has_been_reviewed=True).where( ReviewSegment.update(has_been_reviewed=True).where(
ReviewSegment.id << body.ids ReviewSegment.id << list_of_ids
).execute() ).execute()
return JSONResponse( return JSONResponse(
@@ -321,9 +369,17 @@ def set_multiple_reviewed(body: ReviewModifyMultipleBody):
) )
@router.post("/reviews/delete", response_model=GenericResponse) @router.post("/reviews/delete")
def delete_reviews(body: ReviewModifyMultipleBody): def delete_reviews(body: dict = None):
list_of_ids = body.ids json: dict[str, any] = body or {}
list_of_ids = json.get("ids", "")
if not list_of_ids or len(list_of_ids) == 0:
return JSONResponse(
content=({"success": False, "message": "Not a valid list of ids"}),
status_code=404,
)
reviews = ( reviews = (
ReviewSegment.select( ReviewSegment.select(
ReviewSegment.camera, ReviewSegment.camera,
@@ -364,13 +420,11 @@ def delete_reviews(body: ReviewModifyMultipleBody):
ReviewSegment.delete().where(ReviewSegment.id << list_of_ids).execute() ReviewSegment.delete().where(ReviewSegment.id << list_of_ids).execute()
return JSONResponse( return JSONResponse(
content=({"success": True, "message": "Deleted review items."}), status_code=200 content=({"success": True, "message": "Delete reviews"}), status_code=200
) )
@router.get( @router.get("/review/activity/motion")
"/review/activity/motion", response_model=list[ReviewActivityMotionResponse]
)
def motion_activity(params: ReviewActivityMotionQueryParams = Depends()): def motion_activity(params: ReviewActivityMotionQueryParams = Depends()):
"""Get motion and audio activity.""" """Get motion and audio activity."""
cameras = params.cameras cameras = params.cameras
@@ -444,44 +498,98 @@ def motion_activity(params: ReviewActivityMotionQueryParams = Depends()):
return JSONResponse(content=normalized) return JSONResponse(content=normalized)
@router.get("/review/event/{event_id}", response_model=ReviewSegmentResponse) @router.get("/review/activity/audio")
def audio_activity(params: ReviewActivityMotionQueryParams = Depends()):
"""Get motion and audio activity."""
cameras = params.cameras
before = params.before or datetime.datetime.now().timestamp()
after = (
params.after
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
)
# get scale in seconds
scale = params.scale
clauses = [(Recordings.start_time > after) & (Recordings.end_time < before)]
if cameras != "all":
camera_list = cameras.split(",")
clauses.append((Recordings.camera << camera_list))
all_recordings: list[Recordings] = (
Recordings.select(
Recordings.start_time,
Recordings.duration,
Recordings.objects,
Recordings.dBFS,
)
.where(reduce(operator.and_, clauses))
.order_by(Recordings.start_time.asc())
.iterator()
)
# format is: { timestamp: segment_start_ts, motion: [0-100], audio: [0 - -100] }
# periods where active objects / audio was detected will cause audio to be scaled down
data: list[dict[str, float]] = []
for rec in all_recordings:
data.append(
{
"start_time": rec.start_time,
"audio": rec.dBFS if rec.objects == 0 else 0,
}
)
# resample data using pandas to get activity on scaled basis
df = pd.DataFrame(data, columns=["start_time", "audio"])
df = df.astype(dtype={"audio": "float16"})
# set date as datetime index
df["start_time"] = pd.to_datetime(df["start_time"], unit="s")
df.set_index(["start_time"], inplace=True)
# normalize data
df = df.resample(f"{scale}S").mean().fillna(0.0)
df["audio"] = (
(df["audio"] - df["audio"].max())
/ (df["audio"].min() - df["audio"].max())
* -100
)
# change types for output
df.index = df.index.astype(int) // (10**9)
normalized = df.reset_index().to_dict("records")
return JSONResponse(content=normalized)
@router.get("/review/event/{event_id}")
def get_review_from_event(event_id: str): def get_review_from_event(event_id: str):
try: try:
return JSONResponse( return model_to_dict(
model_to_dict( ReviewSegment.get(
ReviewSegment.get( ReviewSegment.data["detections"].cast("text") % f'*"{event_id}"*'
ReviewSegment.data["detections"].cast("text") % f'*"{event_id}"*'
)
) )
) )
except DoesNotExist: except DoesNotExist:
return JSONResponse( return "Review item not found", 404
content={"success": False, "message": "Review item not found"},
status_code=404,
)
@router.get("/review/{review_id}", response_model=ReviewSegmentResponse) @router.get("/review/{event_id}")
def get_review(review_id: str): def get_review(event_id: str):
try: try:
return JSONResponse( return model_to_dict(ReviewSegment.get(ReviewSegment.id == event_id))
content=model_to_dict(ReviewSegment.get(ReviewSegment.id == review_id))
)
except DoesNotExist: except DoesNotExist:
return JSONResponse( return "Review item not found", 404
content={"success": False, "message": "Review item not found"},
status_code=404,
)
@router.delete("/review/{review_id}/viewed", response_model=GenericResponse) @router.delete("/review/{event_id}/viewed")
def set_not_reviewed(review_id: str): def set_not_reviewed(event_id: str):
try: try:
review: ReviewSegment = ReviewSegment.get(ReviewSegment.id == review_id) review: ReviewSegment = ReviewSegment.get(ReviewSegment.id == event_id)
except DoesNotExist: except DoesNotExist:
return JSONResponse( return JSONResponse(
content=( content=(
{"success": False, "message": "Review " + review_id + " not found"} {"success": False, "message": "Review " + event_id + " not found"}
), ),
status_code=404, status_code=404,
) )
@@ -490,8 +598,6 @@ def set_not_reviewed(review_id: str):
review.save() review.save()
return JSONResponse( return JSONResponse(
content=( content=({"success": True, "message": "Reviewed " + event_id + " not viewed"}),
{"success": True, "message": "Set Review " + review_id + " as not viewed"}
),
status_code=200, status_code=200,
) )

View File

@@ -36,7 +36,6 @@ from frigate.const import (
EXPORT_DIR, EXPORT_DIR,
MODEL_CACHE_DIR, MODEL_CACHE_DIR,
RECORD_DIR, RECORD_DIR,
SHM_FRAMES_VAR,
) )
from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.embeddings import EmbeddingsContext, manage_embeddings from frigate.embeddings import EmbeddingsContext, manage_embeddings
@@ -69,7 +68,6 @@ from frigate.stats.util import stats_init
from frigate.storage import StorageMaintainer from frigate.storage import StorageMaintainer
from frigate.timeline import TimelineProcessor from frigate.timeline import TimelineProcessor
from frigate.util.builtin import empty_and_close_queue from frigate.util.builtin import empty_and_close_queue
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
from frigate.util.object import get_camera_regions_grid from frigate.util.object import get_camera_regions_grid
from frigate.version import VERSION from frigate.version import VERSION
from frigate.video import capture_camera, track_camera from frigate.video import capture_camera, track_camera
@@ -92,7 +90,6 @@ class FrigateApp:
self.processes: dict[str, int] = {} self.processes: dict[str, int] = {}
self.embeddings: Optional[EmbeddingsContext] = None self.embeddings: Optional[EmbeddingsContext] = None
self.region_grids: dict[str, list[list[dict[str, int]]]] = {} self.region_grids: dict[str, list[list[dict[str, int]]]] = {}
self.frame_manager = SharedMemoryFrameManager()
self.config = config self.config = config
def ensure_dirs(self) -> None: def ensure_dirs(self) -> None:
@@ -328,20 +325,20 @@ class FrigateApp:
for det in self.config.detectors.values() for det in self.config.detectors.values()
] ]
) )
shm_in = UntrackedSharedMemory( shm_in = mp.shared_memory.SharedMemory(
name=name, name=name,
create=True, create=True,
size=largest_frame, size=largest_frame,
) )
except FileExistsError: except FileExistsError:
shm_in = UntrackedSharedMemory(name=name) shm_in = mp.shared_memory.SharedMemory(name=name)
try: try:
shm_out = UntrackedSharedMemory( shm_out = mp.shared_memory.SharedMemory(
name=f"out-{name}", create=True, size=20 * 6 * 4 name=f"out-{name}", create=True, size=20 * 6 * 4
) )
except FileExistsError: except FileExistsError:
shm_out = UntrackedSharedMemory(name=f"out-{name}") shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}")
self.detection_shms.append(shm_in) self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out) self.detection_shms.append(shm_out)
@@ -434,11 +431,6 @@ class FrigateApp:
logger.info(f"Capture process not started for disabled camera {name}") logger.info(f"Capture process not started for disabled camera {name}")
continue continue
# pre-create shms
for i in range(shm_frame_count):
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
self.frame_manager.create(f"{config.name}_frame{i}", frame_size)
capture_process = util.Process( capture_process = util.Process(
target=capture_camera, target=capture_camera,
name=f"camera_capture:{name}", name=f"camera_capture:{name}",
@@ -521,21 +513,15 @@ class FrigateApp:
1, 1,
) )
if cam_total_frame_size == 0.0: shm_frame_count = min(50, int(available_shm / (cam_total_frame_size)))
return 0
shm_frame_count = min(
int(os.environ.get(SHM_FRAMES_VAR, "50")),
int(available_shm / (cam_total_frame_size)),
)
logger.debug( logger.debug(
f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM" f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM"
) )
if shm_frame_count < 20: if shm_frame_count < 10:
logger.warning( logger.warning(
f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 20)}MB." f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 10)}MB."
) )
return shm_frame_count return shm_frame_count
@@ -721,7 +707,6 @@ class FrigateApp:
self.event_metadata_updater.stop() self.event_metadata_updater.stop()
self.inter_zmq_proxy.stop() self.inter_zmq_proxy.stop()
self.frame_manager.cleanup()
while len(self.detection_shms) > 0: while len(self.detection_shms) > 0:
shm = self.detection_shms.pop() shm = self.detection_shms.pop()
shm.close() shm.close()

View File

@@ -22,7 +22,7 @@ from frigate.const import (
) )
from frigate.models import Event, Previews, Recordings, ReviewSegment from frigate.models import Event, Previews, Recordings, ReviewSegment
from frigate.ptz.onvif import OnvifCommandEnum, OnvifController from frigate.ptz.onvif import OnvifCommandEnum, OnvifController
from frigate.types import ModelStatusTypesEnum, TrackedObjectUpdateTypesEnum from frigate.types import ModelStatusTypesEnum
from frigate.util.object import get_camera_regions_grid from frigate.util.object import get_camera_regions_grid
from frigate.util.services import restart_frigate from frigate.util.services import restart_frigate
@@ -137,14 +137,8 @@ class Dispatcher:
event.data["description"] = payload["description"] event.data["description"] = payload["description"]
event.save() event.save()
self.publish( self.publish(
"tracked_object_update", "event_update",
json.dumps( json.dumps({"id": event.id, "description": event.data["description"]}),
{
"type": TrackedObjectUpdateTypesEnum.description,
"id": event.id,
"description": event.data["description"],
}
),
) )
def handle_update_model_state(): def handle_update_model_state():

View File

@@ -14,7 +14,7 @@ class EventUpdatePublisher(Publisher):
super().__init__("update") super().__init__("update")
def publish( def publish(
self, payload: tuple[EventTypeEnum, EventStateEnum, str, str, dict[str, any]] self, payload: tuple[EventTypeEnum, EventStateEnum, str, dict[str, any]]
) -> None: ) -> None:
super().publish(payload) super().publish(payload)

View File

@@ -17,7 +17,7 @@ class MqttClient(Communicator): # type: ignore[misc]
def __init__(self, config: FrigateConfig) -> None: def __init__(self, config: FrigateConfig) -> None:
self.config = config self.config = config
self.mqtt_config = config.mqtt self.mqtt_config = config.mqtt
self.connected = False self.connected: bool = False
def subscribe(self, receiver: Callable) -> None: def subscribe(self, receiver: Callable) -> None:
"""Wrapper for allowing dispatcher to subscribe.""" """Wrapper for allowing dispatcher to subscribe."""
@@ -27,7 +27,7 @@ class MqttClient(Communicator): # type: ignore[misc]
def publish(self, topic: str, payload: Any, retain: bool = False) -> None: def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
"""Wrapper for publishing when client is in valid state.""" """Wrapper for publishing when client is in valid state."""
if not self.connected: if not self.connected:
logger.debug(f"Unable to publish to {topic}: client is not connected") logger.error(f"Unable to publish to {topic}: client is not connected")
return return
self.client.publish( self.client.publish(
@@ -133,7 +133,7 @@ class MqttClient(Communicator): # type: ignore[misc]
"""Mqtt connection callback.""" """Mqtt connection callback."""
threading.current_thread().name = "mqtt" threading.current_thread().name = "mqtt"
if reason_code != 0: if reason_code != 0:
if reason_code == "Server unavailable": if reason_code == "Server Unavailable":
logger.error( logger.error(
"Unable to connect to MQTT server: MQTT Server unavailable" "Unable to connect to MQTT server: MQTT Server unavailable"
) )
@@ -173,7 +173,6 @@ class MqttClient(Communicator): # type: ignore[misc]
client_id=self.mqtt_config.client_id, client_id=self.mqtt_config.client_id,
) )
self.client.on_connect = self._on_connect self.client.on_connect = self._on_connect
self.client.on_disconnect = self._on_disconnect
self.client.will_set( self.client.will_set(
self.mqtt_config.topic_prefix + "/available", self.mqtt_config.topic_prefix + "/available",
payload="offline", payload="offline",
@@ -198,6 +197,14 @@ class MqttClient(Communicator): # type: ignore[misc]
for name in self.config.cameras.keys(): for name in self.config.cameras.keys():
for callback in callback_types: for callback in callback_types:
# We need to pre-clear existing set topics because in previous
# versions the webUI retained on the /set topic but this is
# no longer the case.
self.client.publish(
f"{self.mqtt_config.topic_prefix}/{name}/{callback}/set",
None,
retain=True,
)
self.client.message_callback_add( self.client.message_callback_add(
f"{self.mqtt_config.topic_prefix}/{name}/{callback}/set", f"{self.mqtt_config.topic_prefix}/{name}/{callback}/set",
self.on_mqtt_command, self.on_mqtt_command,

View File

@@ -13,7 +13,7 @@ class AuthConfig(FrigateBaseModel):
default=False, title="Reset the admin password on startup" default=False, title="Reset the admin password on startup"
) )
cookie_name: str = Field( cookie_name: str = Field(
default="frigate_token", title="Name for jwt token cookie", pattern=r"^[a-z_]+$" default="frigate_token", title="Name for jwt token cookie", pattern=r"^[a-z]_*$"
) )
cookie_secure: bool = Field(default=False, title="Set secure flag on cookie") cookie_secure: bool = Field(default=False, title="Set secure flag on cookie")
session_length: int = Field( session_length: int = Field(

View File

@@ -38,10 +38,6 @@ class GenAICameraConfig(BaseModel):
default_factory=list, default_factory=list,
title="List of required zones to be entered in order to run generative AI.", title="List of required zones to be entered in order to run generative AI.",
) )
debug_save_thumbnails: bool = Field(
default=False,
title="Save thumbnails sent to generative AI for debugging purposes.",
)
@field_validator("required_zones", mode="before") @field_validator("required_zones", mode="before")
@classmethod @classmethod

View File

@@ -74,7 +74,6 @@ class OnvifConfig(FrigateBaseModel):
port: int = Field(default=8000, title="Onvif Port") port: int = Field(default=8000, title="Onvif Port")
user: Optional[EnvString] = Field(default=None, title="Onvif Username") user: Optional[EnvString] = Field(default=None, title="Onvif Username")
password: Optional[EnvString] = Field(default=None, title="Onvif Password") password: Optional[EnvString] = Field(default=None, title="Onvif Password")
tls_insecure: bool = Field(default=False, title="Onvif Disable TLS verification")
autotracking: PtzAutotrackConfig = Field( autotracking: PtzAutotrackConfig = Field(
default_factory=PtzAutotrackConfig, default_factory=PtzAutotrackConfig,
title="PTZ auto tracking config.", title="PTZ auto tracking config.",

View File

@@ -4,7 +4,6 @@ from typing import Optional
from pydantic import Field from pydantic import Field
from frigate.const import MAX_PRE_CAPTURE from frigate.const import MAX_PRE_CAPTURE
from frigate.review.types import SeverityEnum
from ..base import FrigateBaseModel from ..base import FrigateBaseModel
@@ -95,22 +94,3 @@ class RecordConfig(FrigateBaseModel):
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of recording." default=None, title="Keep track of original state of recording."
) )
@property
def event_pre_capture(self) -> int:
return max(
self.alerts.pre_capture,
self.detections.pre_capture,
)
def get_review_pre_capture(self, severity: SeverityEnum) -> int:
if severity == SeverityEnum.alert:
return self.alerts.pre_capture
else:
return self.detections.pre_capture
def get_review_post_capture(self, severity: SeverityEnum) -> int:
if severity == SeverityEnum.alert:
return self.alerts.post_capture
else:
return self.detections.post_capture

View File

@@ -29,7 +29,6 @@ from frigate.util.builtin import (
) )
from frigate.util.config import ( from frigate.util.config import (
StreamInfoRetriever, StreamInfoRetriever,
find_config_file,
get_relative_coordinates, get_relative_coordinates,
migrate_frigate_config, migrate_frigate_config,
) )
@@ -68,6 +67,7 @@ logger = logging.getLogger(__name__)
yaml = YAML() yaml = YAML()
DEFAULT_CONFIG_FILES = ["/config/config.yaml", "/config/config.yml"]
DEFAULT_CONFIG = """ DEFAULT_CONFIG = """
mqtt: mqtt:
enabled: False enabled: False
@@ -230,16 +230,12 @@ def verify_recording_segments_setup_with_reasonable_time(
try: try:
seg_arg_index = record_args.index("-segment_time") seg_arg_index = record_args.index("-segment_time")
except ValueError: except ValueError:
raise ValueError( raise ValueError(f"Camera {camera_config.name} has no segment_time in \
f"Camera {camera_config.name} has no segment_time in \ recording output args, segment args are required for record.")
recording output args, segment args are required for record."
)
if int(record_args[seg_arg_index + 1]) > 60: if int(record_args[seg_arg_index + 1]) > 60:
raise ValueError( raise ValueError(f"Camera {camera_config.name} has invalid segment_time output arg, \
f"Camera {camera_config.name} has invalid segment_time output arg, \ segment_time must be 60 or less.")
segment_time must be 60 or less."
)
def verify_zone_objects_are_tracked(camera_config: CameraConfig) -> None: def verify_zone_objects_are_tracked(camera_config: CameraConfig) -> None:
@@ -594,27 +590,35 @@ class FrigateConfig(FrigateBaseModel):
if isinstance(detector, dict) if isinstance(detector, dict)
else detector.model_dump(warnings="none") else detector.model_dump(warnings="none")
) )
detector_config: BaseDetectorConfig = adapter.validate_python(model_dict) detector_config: DetectorConfig = adapter.validate_python(model_dict)
if detector_config.model is None:
detector_config.model = self.model.model_copy()
else:
path = detector_config.model.path
detector_config.model = self.model.model_copy()
detector_config.model.path = path
# users should not set model themselves if "path" not in model_dict or len(model_dict.keys()) > 1:
if detector_config.model: logger.warning(
detector_config.model = None "Customizing more than a detector model path is unsupported."
)
model_config = self.model.model_dump(exclude_unset=True, warnings="none") merged_model = deep_merge(
detector_config.model.model_dump(exclude_unset=True, warnings="none"),
self.model.model_dump(exclude_unset=True, warnings="none"),
)
if detector_config.model_path: if "path" not in merged_model:
model_config["path"] = detector_config.model_path
if "path" not in model_config:
if detector_config.type == "cpu": if detector_config.type == "cpu":
model_config["path"] = "/cpu_model.tflite" merged_model["path"] = "/cpu_model.tflite"
elif detector_config.type == "edgetpu": elif detector_config.type == "edgetpu":
model_config["path"] = "/edgetpu_model.tflite" merged_model["path"] = "/edgetpu_model.tflite"
model = ModelConfig.model_validate(model_config) detector_config.model = ModelConfig.model_validate(merged_model)
model.check_and_load_plus_model(self.plus_api, detector_config.type) detector_config.model.check_and_load_plus_model(
model.compute_model_hash() self.plus_api, detector_config.type
detector_config.model = model )
detector_config.model.compute_model_hash()
self.detectors[key] = detector_config self.detectors[key] = detector_config
return self return self
@@ -630,20 +634,27 @@ class FrigateConfig(FrigateBaseModel):
@classmethod @classmethod
def load(cls, **kwargs): def load(cls, **kwargs):
config_path = find_config_file() config_path = os.environ.get("CONFIG_FILE")
# No explicit configuration file, try to find one in the default paths.
if config_path is None:
for path in DEFAULT_CONFIG_FILES:
if os.path.isfile(path):
config_path = path
break
# No configuration file found, create one. # No configuration file found, create one.
new_config = False new_config = False
if not os.path.isfile(config_path): if config_path is None:
logger.info("No config file found, saving default config") logger.info("No config file found, saving default config")
config_path = config_path config_path = DEFAULT_CONFIG_FILES[-1]
new_config = True new_config = True
else: else:
# Check if the config file needs to be migrated. # Check if the config file needs to be migrated.
migrate_frigate_config(config_path) migrate_frigate_config(config_path)
# Finally, load the resulting configuration file. # Finally, load the resulting configuration file.
with open(config_path, "a+" if new_config else "r") as f: with open(config_path, "a+") as f:
# Only write the default config if the opened file is non-empty. This can happen as # Only write the default config if the opened file is non-empty. This can happen as
# a race condition. It's extremely unlikely, but eh. Might as well check it. # a race condition. It's extremely unlikely, but eh. Might as well check it.
if new_config and f.tell() == 0: if new_config and f.tell() == 0:

View File

@@ -23,7 +23,7 @@ EnvString = Annotated[str, AfterValidator(validate_env_string)]
def validate_env_vars(v: dict[str, str], info: ValidationInfo) -> dict[str, str]: def validate_env_vars(v: dict[str, str], info: ValidationInfo) -> dict[str, str]:
if isinstance(info.context, dict) and info.context.get("install", False): if isinstance(info.context, dict) and info.context.get("install", False):
for k, v in v.items(): for k, v in v:
os.environ[k] = v os.environ[k] = v
return v return v

View File

@@ -13,8 +13,6 @@ FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
PLUS_ENV_VAR = "PLUS_API_KEY" PLUS_ENV_VAR = "PLUS_API_KEY"
PLUS_API_HOST = "https://api.frigate.video" PLUS_API_HOST = "https://api.frigate.video"
SHM_FRAMES_VAR = "SHM_MAX_FRAMES"
# Attribute & Object constants # Attribute & Object constants
DEFAULT_ATTRIBUTE_LABEL_MAP = { DEFAULT_ATTRIBUTE_LABEL_MAP = {

View File

@@ -27,11 +27,6 @@ class InputTensorEnum(str, Enum):
nhwc = "nhwc" nhwc = "nhwc"
class InputDTypeEnum(str, Enum):
float = "float"
int = "int"
class ModelTypeEnum(str, Enum): class ModelTypeEnum(str, Enum):
ssd = "ssd" ssd = "ssd"
yolox = "yolox" yolox = "yolox"
@@ -58,9 +53,6 @@ class ModelConfig(BaseModel):
input_pixel_format: PixelFormatEnum = Field( input_pixel_format: PixelFormatEnum = Field(
default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format" default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format"
) )
input_dtype: InputDTypeEnum = Field(
default=InputDTypeEnum.int, title="Model Input D Type"
)
model_type: ModelTypeEnum = Field( model_type: ModelTypeEnum = Field(
default=ModelTypeEnum.ssd, title="Object Detection Model Type" default=ModelTypeEnum.ssd, title="Object Detection Model Type"
) )
@@ -194,9 +186,6 @@ class BaseDetectorConfig(BaseModel):
model: Optional[ModelConfig] = Field( model: Optional[ModelConfig] = Field(
default=None, title="Detector specific model configuration." default=None, title="Detector specific model configuration."
) )
model_path: Optional[str] = Field(
default=None, title="Detector specific model path."
)
model_config = ConfigDict( model_config = ConfigDict(
extra="allow", arbitrary_types_allowed=True, protected_namespaces=() extra="allow", arbitrary_types_allowed=True, protected_namespaces=()
) )

View File

@@ -32,7 +32,6 @@ class DeepStack(DetectionApi):
self.api_timeout = detector_config.api_timeout self.api_timeout = detector_config.api_timeout
self.api_key = detector_config.api_key self.api_key = detector_config.api_key
self.labels = detector_config.model.merged_labelmap self.labels = detector_config.model.merged_labelmap
self.session = requests.Session()
def get_label_index(self, label_value): def get_label_index(self, label_value):
if label_value.lower() == "truck": if label_value.lower() == "truck":
@@ -52,7 +51,7 @@ class DeepStack(DetectionApi):
data = {"api_key": self.api_key} data = {"api_key": self.api_key}
try: try:
response = self.session.post( response = requests.post(
self.api_url, self.api_url,
data=data, data=data,
files={"image": image_bytes}, files={"image": image_bytes},

View File

@@ -54,7 +54,7 @@ class ONNXDetector(DetectionApi):
logger.info(f"ONNX: {path} loaded") logger.info(f"ONNX: {path} loaded")
def detect_raw(self, tensor_input: np.ndarray): def detect_raw(self, tensor_input):
model_input_name = self.model.get_inputs()[0].name model_input_name = self.model.get_inputs()[0].name
tensor_output = self.model.run(None, {model_input_name: tensor_input}) tensor_output = self.model.run(None, {model_input_name: tensor_input})

View File

@@ -136,17 +136,17 @@ class Rknn(DetectionApi):
def check_config(self, config): def check_config(self, config):
if (config.model.width != 320) or (config.model.height != 320): if (config.model.width != 320) or (config.model.height != 320):
raise Exception( raise Exception(
"Make sure to set the model width and height to 320 in your config." "Make sure to set the model width and height to 320 in your config.yml."
) )
if config.model.input_pixel_format != "bgr": if config.model.input_pixel_format != "bgr":
raise Exception( raise Exception(
'Make sure to set the model input_pixel_format to "bgr" in your config.' 'Make sure to set the model input_pixel_format to "bgr" in your config.yml.'
) )
if config.model.input_tensor != "nhwc": if config.model.input_tensor != "nhwc":
raise Exception( raise Exception(
'Make sure to set the model input_tensor to "nhwc" in your config.' 'Make sure to set the model input_tensor to "nhwc" in your config.yml.'
) )
def detect_raw(self, tensor_input): def detect_raw(self, tensor_input):

View File

@@ -98,7 +98,9 @@ class ROCmDetector(DetectionApi):
else: else:
logger.info(f"AMD/ROCm: loading model from {path}") logger.info(f"AMD/ROCm: loading model from {path}")
if ( if path.endswith(".onnx"):
self.model = migraphx.parse_onnx(path)
elif (
path.endswith(".tf") path.endswith(".tf")
or path.endswith(".tf2") or path.endswith(".tf2")
or path.endswith(".tflite") or path.endswith(".tflite")
@@ -106,7 +108,7 @@ class ROCmDetector(DetectionApi):
# untested # untested
self.model = migraphx.parse_tf(path) self.model = migraphx.parse_tf(path)
else: else:
self.model = migraphx.parse_onnx(path) raise Exception(f"AMD/ROCm: unknown model format {path}")
logger.info("AMD/ROCm: compiling the model") logger.info("AMD/ROCm: compiling the model")

View File

@@ -1,11 +1,13 @@
"""SQLite-vec embeddings database.""" """SQLite-vec embeddings database."""
import base64 import base64
import io
import logging import logging
import os import os
import time import time
from numpy import ndarray from numpy import ndarray
from PIL import Image
from playhouse.shortcuts import model_to_dict from playhouse.shortcuts import model_to_dict
from frigate.comms.inter_process import InterProcessRequestor from frigate.comms.inter_process import InterProcessRequestor
@@ -20,7 +22,7 @@ from frigate.models import Event
from frigate.types import ModelStatusTypesEnum from frigate.types import ModelStatusTypesEnum
from frigate.util.builtin import serialize from frigate.util.builtin import serialize
from .functions.onnx import GenericONNXEmbedding, ModelTypeEnum from .functions.onnx import GenericONNXEmbedding
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -95,7 +97,7 @@ class Embeddings:
"text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx", "text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx",
}, },
model_size=config.model_size, model_size=config.model_size,
model_type=ModelTypeEnum.text, model_type="text",
requestor=self.requestor, requestor=self.requestor,
device="CPU", device="CPU",
) )
@@ -116,102 +118,83 @@ class Embeddings:
model_file=model_file, model_file=model_file,
download_urls=download_urls, download_urls=download_urls,
model_size=config.model_size, model_size=config.model_size,
model_type=ModelTypeEnum.vision, model_type="vision",
requestor=self.requestor, requestor=self.requestor,
device="GPU" if config.model_size == "large" else "CPU", device="GPU" if config.model_size == "large" else "CPU",
) )
def embed_thumbnail( def upsert_thumbnail(self, event_id: str, thumbnail: bytes) -> ndarray:
self, event_id: str, thumbnail: bytes, upsert: bool = True
) -> ndarray:
"""Embed thumbnail and optionally insert into DB.
@param: event_id in Events DB
@param: thumbnail bytes in jpg format
@param: upsert If embedding should be upserted into vec DB
"""
# Convert thumbnail bytes to PIL Image # Convert thumbnail bytes to PIL Image
embedding = self.vision_embedding([thumbnail])[0] image = Image.open(io.BytesIO(thumbnail)).convert("RGB")
embedding = self.vision_embedding([image])[0]
if upsert: self.db.execute_sql(
self.db.execute_sql( """
""" INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding)
INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding) VALUES(?, ?)
VALUES(?, ?) """,
""", (event_id, serialize(embedding)),
(event_id, serialize(embedding)), )
)
return embedding return embedding
def batch_embed_thumbnail( def batch_upsert_thumbnail(self, event_thumbs: dict[str, bytes]) -> list[ndarray]:
self, event_thumbs: dict[str, bytes], upsert: bool = True images = [
) -> list[ndarray]: Image.open(io.BytesIO(thumb)).convert("RGB")
"""Embed thumbnails and optionally insert into DB. for thumb in event_thumbs.values()
]
@param: event_thumbs Map of Event IDs in DB to thumbnail bytes in jpg format
@param: upsert If embedding should be upserted into vec DB
"""
ids = list(event_thumbs.keys()) ids = list(event_thumbs.keys())
embeddings = self.vision_embedding(list(event_thumbs.values())) embeddings = self.vision_embedding(images)
if upsert: items = []
items = []
for i in range(len(ids)): for i in range(len(ids)):
items.append(ids[i]) items.append(ids[i])
items.append(serialize(embeddings[i])) items.append(serialize(embeddings[i]))
self.db.execute_sql(
"""
INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding)
VALUES {}
""".format(", ".join(["(?, ?)"] * len(ids))),
items,
)
self.db.execute_sql(
"""
INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding)
VALUES {}
""".format(", ".join(["(?, ?)"] * len(ids))),
items,
)
return embeddings return embeddings
def embed_description( def upsert_description(self, event_id: str, description: str) -> ndarray:
self, event_id: str, description: str, upsert: bool = True
) -> ndarray:
embedding = self.text_embedding([description])[0] embedding = self.text_embedding([description])[0]
self.db.execute_sql(
if upsert: """
self.db.execute_sql( INSERT OR REPLACE INTO vec_descriptions(id, description_embedding)
""" VALUES(?, ?)
INSERT OR REPLACE INTO vec_descriptions(id, description_embedding) """,
VALUES(?, ?) (event_id, serialize(embedding)),
""", )
(event_id, serialize(embedding)),
)
return embedding return embedding
def batch_embed_description( def batch_upsert_description(self, event_descriptions: dict[str, str]) -> ndarray:
self, event_descriptions: dict[str, str], upsert: bool = True
) -> ndarray:
# upsert embeddings one by one to avoid token limit # upsert embeddings one by one to avoid token limit
embeddings = [] embeddings = []
for desc in event_descriptions.values(): for desc in event_descriptions.values():
embeddings.append(self.text_embedding([desc])[0]) embeddings.append(self.text_embedding([desc])[0])
if upsert: ids = list(event_descriptions.keys())
ids = list(event_descriptions.keys())
items = []
for i in range(len(ids)): items = []
items.append(ids[i])
items.append(serialize(embeddings[i]))
self.db.execute_sql( for i in range(len(ids)):
""" items.append(ids[i])
INSERT OR REPLACE INTO vec_descriptions(id, description_embedding) items.append(serialize(embeddings[i]))
VALUES {}
""".format(", ".join(["(?, ?)"] * len(ids))), self.db.execute_sql(
items, """
) INSERT OR REPLACE INTO vec_descriptions(id, description_embedding)
VALUES {}
""".format(", ".join(["(?, ?)"] * len(ids))),
items,
)
return embeddings return embeddings
@@ -278,10 +261,10 @@ class Embeddings:
totals["processed_objects"] += 1 totals["processed_objects"] += 1
# run batch embedding # run batch embedding
self.batch_embed_thumbnail(batch_thumbs) self.batch_upsert_thumbnail(batch_thumbs)
if batch_descs: if batch_descs:
self.batch_embed_description(batch_descs) self.batch_upsert_description(batch_descs)
# report progress every batch so we don't spam the logs # report progress every batch so we don't spam the logs
progress = (totals["processed_objects"] / total_events) * 100 progress = (totals["processed_objects"] / total_events) * 100

View File

@@ -1,7 +1,6 @@
import logging import logging
import os import os
import warnings import warnings
from enum import Enum
from io import BytesIO from io import BytesIO
from typing import Dict, List, Optional, Union from typing import Dict, List, Optional, Union
@@ -32,12 +31,6 @@ disable_progress_bar()
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class ModelTypeEnum(str, Enum):
face = "face"
vision = "vision"
text = "text"
class GenericONNXEmbedding: class GenericONNXEmbedding:
"""Generic embedding function for ONNX models (text and vision).""" """Generic embedding function for ONNX models (text and vision)."""
@@ -95,10 +88,7 @@ class GenericONNXEmbedding:
file_name = os.path.basename(path) file_name = os.path.basename(path)
if file_name in self.download_urls: if file_name in self.download_urls:
ModelDownloader.download_from_url(self.download_urls[file_name], path) ModelDownloader.download_from_url(self.download_urls[file_name], path)
elif ( elif file_name == self.tokenizer_file and self.model_type == "text":
file_name == self.tokenizer_file
and self.model_type == ModelTypeEnum.text
):
if not os.path.exists(path + "/" + self.model_name): if not os.path.exists(path + "/" + self.model_name):
logger.info(f"Downloading {self.model_name} tokenizer") logger.info(f"Downloading {self.model_name} tokenizer")
tokenizer = AutoTokenizer.from_pretrained( tokenizer = AutoTokenizer.from_pretrained(
@@ -129,7 +119,7 @@ class GenericONNXEmbedding:
if self.runner is None: if self.runner is None:
if self.downloader: if self.downloader:
self.downloader.wait_for_download() self.downloader.wait_for_download()
if self.model_type == ModelTypeEnum.text: if self.model_type == "text":
self.tokenizer = self._load_tokenizer() self.tokenizer = self._load_tokenizer()
else: else:
self.feature_extractor = self._load_feature_extractor() self.feature_extractor = self._load_feature_extractor()
@@ -153,35 +143,11 @@ class GenericONNXEmbedding:
f"{MODEL_CACHE_DIR}/{self.model_name}", f"{MODEL_CACHE_DIR}/{self.model_name}",
) )
def _preprocess_inputs(self, raw_inputs: any) -> any:
if self.model_type == ModelTypeEnum.text:
max_length = max(len(self.tokenizer.encode(text)) for text in raw_inputs)
return [
self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=max_length,
return_tensors="np",
)
for text in raw_inputs
]
elif self.model_type == ModelTypeEnum.vision:
processed_images = [self._process_image(img) for img in raw_inputs]
return [
self.feature_extractor(images=image, return_tensors="np")
for image in processed_images
]
else:
raise ValueError(f"Unable to preprocess inputs for {self.model_type}")
def _process_image(self, image): def _process_image(self, image):
if isinstance(image, str): if isinstance(image, str):
if image.startswith("http"): if image.startswith("http"):
response = requests.get(image) response = requests.get(image)
image = Image.open(BytesIO(response.content)).convert("RGB") image = Image.open(BytesIO(response.content)).convert("RGB")
elif isinstance(image, bytes):
image = Image.open(BytesIO(image)).convert("RGB")
return image return image
@@ -197,7 +163,25 @@ class GenericONNXEmbedding:
) )
return [] return []
processed_inputs = self._preprocess_inputs(inputs) if self.model_type == "text":
max_length = max(len(self.tokenizer.encode(text)) for text in inputs)
processed_inputs = [
self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=max_length,
return_tensors="np",
)
for text in inputs
]
else:
processed_images = [self._process_image(img) for img in inputs]
processed_inputs = [
self.feature_extractor(images=image, return_tensors="np")
for image in processed_images
]
input_names = self.runner.get_input_names() input_names = self.runner.get_input_names()
onnx_inputs = {name: [] for name in input_names} onnx_inputs = {name: [] for name in input_names}
input: dict[str, any] input: dict[str, any]

View File

@@ -5,7 +5,6 @@ import logging
import os import os
import threading import threading
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path
from typing import Optional from typing import Optional
import cv2 import cv2
@@ -25,7 +24,6 @@ from frigate.const import CLIPS_DIR, UPDATE_EVENT_DESCRIPTION
from frigate.events.types import EventTypeEnum from frigate.events.types import EventTypeEnum
from frigate.genai import get_genai_client from frigate.genai import get_genai_client
from frigate.models import Event from frigate.models import Event
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import serialize from frigate.util.builtin import serialize
from frigate.util.image import SharedMemoryFrameManager, calculate_region from frigate.util.image import SharedMemoryFrameManager, calculate_region
@@ -64,7 +62,7 @@ class EmbeddingMaintainer(threading.Thread):
self.requestor = InterProcessRequestor() self.requestor = InterProcessRequestor()
self.stop_event = stop_event self.stop_event = stop_event
self.tracked_events = {} self.tracked_events = {}
self.genai_client = get_genai_client(config) self.genai_client = get_genai_client(config.genai)
def run(self) -> None: def run(self) -> None:
"""Maintain a SQLite-vec database for semantic search.""" """Maintain a SQLite-vec database for semantic search."""
@@ -88,7 +86,7 @@ class EmbeddingMaintainer(threading.Thread):
try: try:
if topic == EmbeddingsRequestEnum.embed_description.value: if topic == EmbeddingsRequestEnum.embed_description.value:
return serialize( return serialize(
self.embeddings.embed_description( self.embeddings.upsert_description(
data["id"], data["description"] data["id"], data["description"]
), ),
pack=False, pack=False,
@@ -96,7 +94,7 @@ class EmbeddingMaintainer(threading.Thread):
elif topic == EmbeddingsRequestEnum.embed_thumbnail.value: elif topic == EmbeddingsRequestEnum.embed_thumbnail.value:
thumbnail = base64.b64decode(data["thumbnail"]) thumbnail = base64.b64decode(data["thumbnail"])
return serialize( return serialize(
self.embeddings.embed_thumbnail(data["id"], thumbnail), self.embeddings.upsert_thumbnail(data["id"], thumbnail),
pack=False, pack=False,
) )
elif topic == EmbeddingsRequestEnum.generate_search.value: elif topic == EmbeddingsRequestEnum.generate_search.value:
@@ -115,7 +113,7 @@ class EmbeddingMaintainer(threading.Thread):
if update is None: if update is None:
return return
source_type, _, camera, frame_name, data = update source_type, _, camera, data = update
if not camera or source_type != EventTypeEnum.tracked_object: if not camera or source_type != EventTypeEnum.tracked_object:
return return
@@ -135,9 +133,8 @@ class EmbeddingMaintainer(threading.Thread):
# Create our own thumbnail based on the bounding box and the frame time # Create our own thumbnail based on the bounding box and the frame time
try: try:
yuv_frame = self.frame_manager.get( frame_id = f"{camera}{data['frame_time']}"
frame_name, camera_config.frame_shape_yuv yuv_frame = self.frame_manager.get(frame_id, camera_config.frame_shape_yuv)
)
if yuv_frame is not None: if yuv_frame is not None:
data["thumbnail"] = self._create_thumbnail(yuv_frame, data["box"]) data["thumbnail"] = self._create_thumbnail(yuv_frame, data["box"])
@@ -149,7 +146,7 @@ class EmbeddingMaintainer(threading.Thread):
self.tracked_events[data["id"]].append(data) self.tracked_events[data["id"]].append(data)
self.frame_manager.close(frame_name) self.frame_manager.close(frame_id)
except FileNotFoundError: except FileNotFoundError:
pass pass
@@ -218,47 +215,16 @@ class EmbeddingMaintainer(threading.Thread):
_, buffer = cv2.imencode(".jpg", cropped_image) _, buffer = cv2.imencode(".jpg", cropped_image)
snapshot_image = buffer.tobytes() snapshot_image = buffer.tobytes()
num_thumbnails = len(self.tracked_events.get(event_id, []))
embed_image = ( embed_image = (
[snapshot_image] [snapshot_image]
if event.has_snapshot and camera_config.genai.use_snapshot if event.has_snapshot and camera_config.genai.use_snapshot
else ( else (
[ [thumbnail for data in self.tracked_events[event_id]]
data["thumbnail"] if len(self.tracked_events.get(event_id, [])) > 0
for data in self.tracked_events[event_id]
]
if num_thumbnails > 0
else [thumbnail] else [thumbnail]
) )
) )
if camera_config.genai.debug_save_thumbnails and num_thumbnails > 0:
logger.debug(
f"Saving {num_thumbnails} thumbnails for event {event.id}"
)
Path(
os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")
).mkdir(parents=True, exist_ok=True)
for idx, data in enumerate(self.tracked_events[event_id], 1):
jpg_bytes: bytes = data["thumbnail"]
if jpg_bytes is None:
logger.warning(
f"Unable to save thumbnail {idx} for {event.id}."
)
else:
with open(
os.path.join(
CLIPS_DIR,
f"genai-requests/{event.id}/{idx}.jpg",
),
"wb",
) as j:
j.write(jpg_bytes)
# Generate the description. Call happens in a thread since it is network bound. # Generate the description. Call happens in a thread since it is network bound.
threading.Thread( threading.Thread(
target=self._embed_description, target=self._embed_description,
@@ -304,7 +270,7 @@ class EmbeddingMaintainer(threading.Thread):
def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None: def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None:
"""Embed the thumbnail for an event.""" """Embed the thumbnail for an event."""
self.embeddings.embed_thumbnail(event_id, thumbnail) self.embeddings.upsert_thumbnail(event_id, thumbnail)
def _embed_description(self, event: Event, thumbnails: list[bytes]) -> None: def _embed_description(self, event: Event, thumbnails: list[bytes]) -> None:
"""Embed the description for an event.""" """Embed the description for an event."""
@@ -321,15 +287,11 @@ class EmbeddingMaintainer(threading.Thread):
# fire and forget description update # fire and forget description update
self.requestor.send_data( self.requestor.send_data(
UPDATE_EVENT_DESCRIPTION, UPDATE_EVENT_DESCRIPTION,
{ {"id": event.id, "description": description},
"type": TrackedObjectUpdateTypesEnum.description,
"id": event.id,
"description": description,
},
) )
# Embed the description # Encode the description
self.embeddings.embed_description(event.id, description) self.embeddings.upsert_description(event.id, description)
logger.debug( logger.debug(
"Generated description for %s (%d images): %s", "Generated description for %s (%d images): %s",
@@ -357,25 +319,18 @@ class EmbeddingMaintainer(threading.Thread):
) )
if event.has_snapshot and source == "snapshot": if event.has_snapshot and source == "snapshot":
snapshot_file = os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg") with open(
os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"),
if not os.path.isfile(snapshot_file): "rb",
logger.error( ) as image_file:
f"Cannot regenerate description for {event.id}, snapshot file not found: {snapshot_file}"
)
return
with open(snapshot_file, "rb") as image_file:
snapshot_image = image_file.read() snapshot_image = image_file.read()
img = cv2.imdecode( img = cv2.imdecode(
np.frombuffer(snapshot_image, dtype=np.int8), cv2.IMREAD_COLOR np.frombuffer(snapshot_image, dtype=np.int8), cv2.IMREAD_COLOR
) )
# crop snapshot based on region before sending off to genai # crop snapshot based on region before sending off to genai
# provide full image if region doesn't exist (manual events)
region = event.data.get("region", [0, 0, 1, 1])
height, width = img.shape[:2] height, width = img.shape[:2]
x1_rel, y1_rel, width_rel, height_rel = region x1_rel, y1_rel, width_rel, height_rel = event.data["region"]
x1, y1 = int(x1_rel * width), int(y1_rel * height) x1, y1 = int(x1_rel * width), int(y1_rel * height)
cropped_image = img[ cropped_image = img[
@@ -389,7 +344,7 @@ class EmbeddingMaintainer(threading.Thread):
[snapshot_image] [snapshot_image]
if event.has_snapshot and source == "snapshot" if event.has_snapshot and source == "snapshot"
else ( else (
[data["thumbnail"] for data in self.tracked_events[event_id]] [thumbnail for data in self.tracked_events[event_id]]
if len(self.tracked_events.get(event_id, [])) > 0 if len(self.tracked_events.get(event_id, [])) > 0
else [thumbnail] else [thumbnail]
) )

View File

@@ -64,8 +64,6 @@ def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]:
class AudioProcessor(util.Process): class AudioProcessor(util.Process):
name = "frigate.audio_manager"
def __init__( def __init__(
self, self,
cameras: list[CameraConfig], cameras: list[CameraConfig],
@@ -216,10 +214,6 @@ class AudioEventMaintainer(threading.Thread):
"label": label, "label": label,
"last_detection": datetime.datetime.now().timestamp(), "last_detection": datetime.datetime.now().timestamp(),
} }
else:
self.logger.warning(
f"Failed to create audio event with status code {resp.status_code}"
)
def expire_detections(self) -> None: def expire_detections(self) -> None:
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()

View File

@@ -4,6 +4,7 @@ import datetime
import logging import logging
import os import os
import threading import threading
from enum import Enum
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path from pathlib import Path
@@ -15,7 +16,9 @@ from frigate.models import Event, Timeline
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
CHUNK_SIZE = 50 class EventCleanupType(str, Enum):
clips = "clips"
snapshots = "snapshots"
class EventCleanup(threading.Thread): class EventCleanup(threading.Thread):
@@ -61,11 +64,19 @@ class EventCleanup(threading.Thread):
return self.camera_labels[camera]["labels"] return self.camera_labels[camera]["labels"]
def expire_snapshots(self) -> list[str]: def expire(self, media_type: EventCleanupType) -> list[str]:
## Expire events from unlisted cameras based on the global config ## Expire events from unlisted cameras based on the global config
retain_config = self.config.snapshots.retain if media_type == EventCleanupType.clips:
file_extension = "jpg" expire_days = max(
update_params = {"has_snapshot": False} self.config.record.alerts.retain.days,
self.config.record.detections.retain.days,
)
file_extension = None # mp4 clips are no longer stored in /clips
update_params = {"has_clip": False}
else:
retain_config = self.config.snapshots.retain
file_extension = "jpg"
update_params = {"has_snapshot": False}
distinct_labels = self.get_removed_camera_labels() distinct_labels = self.get_removed_camera_labels()
@@ -73,7 +84,10 @@ class EventCleanup(threading.Thread):
# loop over object types in db # loop over object types in db
for event in distinct_labels: for event in distinct_labels:
# get expiration time for this label # get expiration time for this label
expire_days = retain_config.objects.get(event.label, retain_config.default) if media_type == EventCleanupType.snapshots:
expire_days = retain_config.objects.get(
event.label, retain_config.default
)
expire_after = ( expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days) datetime.datetime.now() - datetime.timedelta(days=expire_days)
@@ -93,7 +107,6 @@ class EventCleanup(threading.Thread):
.namedtuples() .namedtuples()
.iterator() .iterator()
) )
logger.debug(f"{len(list(expired_events))} events can be expired")
# delete the media from disk # delete the media from disk
for expired in expired_events: for expired in expired_events:
media_name = f"{expired.camera}-{expired.id}" media_name = f"{expired.camera}-{expired.id}"
@@ -112,40 +125,25 @@ class EventCleanup(threading.Thread):
logger.warning(f"Unable to delete event images: {e}") logger.warning(f"Unable to delete event images: {e}")
# update the clips attribute for the db entry # update the clips attribute for the db entry
query = Event.select(Event.id).where( update_query = Event.update(update_params).where(
Event.camera.not_in(self.camera_keys), Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after, Event.start_time < expire_after,
Event.label == event.label, Event.label == event.label,
Event.retain_indefinitely == False, Event.retain_indefinitely == False,
) )
update_query.execute()
events_to_update = []
for event in query.iterator():
events_to_update.append(event.id)
if len(events_to_update) >= CHUNK_SIZE:
logger.debug(
f"Updating {update_params} for {len(events_to_update)} events"
)
Event.update(update_params).where(
Event.id << events_to_update
).execute()
events_to_update = []
# Update any remaining events
if events_to_update:
logger.debug(
f"Updating clips/snapshots attribute for {len(events_to_update)} events"
)
Event.update(update_params).where(
Event.id << events_to_update
).execute()
events_to_update = [] events_to_update = []
## Expire events from cameras based on the camera config ## Expire events from cameras based on the camera config
for name, camera in self.config.cameras.items(): for name, camera in self.config.cameras.items():
retain_config = camera.snapshots.retain if media_type == EventCleanupType.clips:
expire_days = max(
camera.record.alerts.retain.days,
camera.record.detections.retain.days,
)
else:
retain_config = camera.snapshots.retain
# get distinct objects in database for this camera # get distinct objects in database for this camera
distinct_labels = self.get_camera_labels(name) distinct_labels = self.get_camera_labels(name)
@@ -153,9 +151,10 @@ class EventCleanup(threading.Thread):
# loop over object types in db # loop over object types in db
for event in distinct_labels: for event in distinct_labels:
# get expiration time for this label # get expiration time for this label
expire_days = retain_config.objects.get( if media_type == EventCleanupType.snapshots:
event.label, retain_config.default expire_days = retain_config.objects.get(
) event.label, retain_config.default
)
expire_after = ( expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days) datetime.datetime.now() - datetime.timedelta(days=expire_days)
@@ -182,157 +181,28 @@ class EventCleanup(threading.Thread):
for event in expired_events: for event in expired_events:
events_to_update.append(event.id) events_to_update.append(event.id)
try: if media_type == EventCleanupType.snapshots:
media_name = f"{event.camera}-{event.id}" try:
media_path = Path( media_name = f"{event.camera}-{event.id}"
f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}" media_path = Path(
) f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
media_path.unlink(missing_ok=True) )
media_path = Path( media_path.unlink(missing_ok=True)
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png" media_path = Path(
) f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
media_path.unlink(missing_ok=True) )
except OSError as e: media_path.unlink(missing_ok=True)
logger.warning(f"Unable to delete event images: {e}") except OSError as e:
logger.warning(f"Unable to delete event images: {e}")
# update the clips attribute for the db entry # update the clips attribute for the db entry
for i in range(0, len(events_to_update), CHUNK_SIZE): Event.update(update_params).where(Event.id << events_to_update).execute()
batch = events_to_update[i : i + CHUNK_SIZE]
logger.debug(f"Updating {update_params} for {len(batch)} events")
Event.update(update_params).where(Event.id << batch).execute()
return events_to_update
def expire_clips(self) -> list[str]:
## Expire events from unlisted cameras based on the global config
expire_days = max(
self.config.record.alerts.retain.days,
self.config.record.detections.retain.days,
)
file_extension = None # mp4 clips are no longer stored in /clips
update_params = {"has_clip": False}
# get expiration time for this label
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
# grab all events after specific time
expired_events: list[Event] = (
Event.select(
Event.id,
Event.camera,
)
.where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.retain_indefinitely == False,
)
.namedtuples()
.iterator()
)
logger.debug(f"{len(list(expired_events))} events can be expired")
# delete the media from disk
for expired in expired_events:
media_name = f"{expired.camera}-{expired.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
try:
media_path.unlink(missing_ok=True)
if file_extension == "jpg":
media_path = Path(
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
)
media_path.unlink(missing_ok=True)
except OSError as e:
logger.warning(f"Unable to delete event images: {e}")
# update the clips attribute for the db entry
query = Event.select(Event.id).where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.retain_indefinitely == False,
)
events_to_update = []
for event in query.iterator():
events_to_update.append(event.id)
if len(events_to_update) >= CHUNK_SIZE:
logger.debug(
f"Updating {update_params} for {len(events_to_update)} events"
)
Event.update(update_params).where(
Event.id << events_to_update
).execute()
events_to_update = []
# Update any remaining events
if events_to_update:
logger.debug(
f"Updating clips/snapshots attribute for {len(events_to_update)} events"
)
Event.update(update_params).where(Event.id << events_to_update).execute()
events_to_update = []
now = datetime.datetime.now()
## Expire events from cameras based on the camera config
for name, camera in self.config.cameras.items():
expire_days = max(
camera.record.alerts.retain.days,
camera.record.detections.retain.days,
)
alert_expire_date = (
now - datetime.timedelta(days=camera.record.alerts.retain.days)
).timestamp()
detection_expire_date = (
now - datetime.timedelta(days=camera.record.detections.retain.days)
).timestamp()
# grab all events after specific time
expired_events = (
Event.select(
Event.id,
Event.camera,
)
.where(
Event.camera == name,
Event.retain_indefinitely == False,
(
(
(Event.data["max_severity"] != "detection")
| (Event.data["max_severity"].is_null())
)
& (Event.end_time < alert_expire_date)
)
| (
(Event.data["max_severity"] == "detection")
& (Event.end_time < detection_expire_date)
),
)
.namedtuples()
.iterator()
)
# delete the grabbed clips from disk
# only snapshots are stored in /clips
# so no need to delete mp4 files
for event in expired_events:
events_to_update.append(event.id)
# update the clips attribute for the db entry
for i in range(0, len(events_to_update), CHUNK_SIZE):
batch = events_to_update[i : i + CHUNK_SIZE]
logger.debug(f"Updating {update_params} for {len(batch)} events")
Event.update(update_params).where(Event.id << batch).execute()
return events_to_update return events_to_update
def run(self) -> None: def run(self) -> None:
# only expire events every 5 minutes # only expire events every 5 minutes
while not self.stop_event.wait(300): while not self.stop_event.wait(300):
events_with_expired_clips = self.expire_clips() events_with_expired_clips = self.expire(EventCleanupType.clips)
# delete timeline entries for events that have expired recordings # delete timeline entries for events that have expired recordings
# delete up to 100,000 at a time # delete up to 100,000 at a time
@@ -343,7 +213,7 @@ class EventCleanup(threading.Thread):
Timeline.source_id << deleted_events_list[i : i + max_deletes] Timeline.source_id << deleted_events_list[i : i + max_deletes]
).execute() ).execute()
self.expire_snapshots() self.expire(EventCleanupType.snapshots)
# drop events from db where has_clip and has_snapshot are false # drop events from db where has_clip and has_snapshot are false
events = ( events = (
@@ -352,11 +222,10 @@ class EventCleanup(threading.Thread):
.iterator() .iterator()
) )
events_to_delete = [e.id for e in events] events_to_delete = [e.id for e in events]
logger.debug(f"Found {len(events_to_delete)} events that can be expired")
if len(events_to_delete) > 0: if len(events_to_delete) > 0:
for i in range(0, len(events_to_delete), CHUNK_SIZE): chunk_size = 50
chunk = events_to_delete[i : i + CHUNK_SIZE] for i in range(0, len(events_to_delete), chunk_size):
logger.debug(f"Deleting {len(chunk)} events from the database") chunk = events_to_delete[i : i + chunk_size]
Event.delete().where(Event.id << chunk).execute() Event.delete().where(Event.id << chunk).execute()
if self.config.semantic_search.enabled: if self.config.semantic_search.enabled:

View File

@@ -10,7 +10,6 @@ from enum import Enum
from typing import Optional from typing import Optional
import cv2 import cv2
from numpy import ndarray
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
from frigate.comms.events_updater import EventUpdatePublisher from frigate.comms.events_updater import EventUpdatePublisher
@@ -46,7 +45,7 @@ class ExternalEventProcessor:
duration: Optional[int], duration: Optional[int],
include_recording: bool, include_recording: bool,
draw: dict[str, any], draw: dict[str, any],
snapshot_frame: Optional[ndarray], snapshot_frame: any,
) -> str: ) -> str:
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
camera_config = self.config.cameras.get(camera) camera_config = self.config.cameras.get(camera)
@@ -65,14 +64,13 @@ class ExternalEventProcessor:
EventTypeEnum.api, EventTypeEnum.api,
EventStateEnum.start, EventStateEnum.start,
camera, camera,
"",
{ {
"id": event_id, "id": event_id,
"label": label, "label": label,
"sub_label": sub_label, "sub_label": sub_label,
"score": score, "score": score,
"camera": camera, "camera": camera,
"start_time": now - camera_config.record.event_pre_capture, "start_time": now,
"end_time": end, "end_time": end,
"thumbnail": thumbnail, "thumbnail": thumbnail,
"has_clip": camera_config.record.enabled and include_recording, "has_clip": camera_config.record.enabled and include_recording,
@@ -108,7 +106,6 @@ class ExternalEventProcessor:
EventTypeEnum.api, EventTypeEnum.api,
EventStateEnum.end, EventStateEnum.end,
None, None,
"",
{"id": event_id, "end_time": end_time}, {"id": event_id, "end_time": end_time},
) )
) )
@@ -133,11 +130,8 @@ class ExternalEventProcessor:
label: str, label: str,
event_id: str, event_id: str,
draw: dict[str, any], draw: dict[str, any],
img_frame: Optional[ndarray], img_frame: any,
) -> Optional[str]: ) -> str:
if img_frame is None:
return None
# write clean snapshot if enabled # write clean snapshot if enabled
if camera_config.snapshots.clean_copy: if camera_config.snapshots.clean_copy:
ret, png = cv2.imencode(".png", img_frame) ret, png = cv2.imencode(".png", img_frame)

View File

@@ -75,30 +75,25 @@ class EventProcessor(threading.Thread):
if update == None: if update == None:
continue continue
source_type, event_type, camera, _, event_data = update source_type, event_type, camera, event_data = update
logger.debug( logger.debug(
f"Event received: {source_type} {event_type} {camera} {event_data['id']}" f"Event received: {source_type} {event_type} {camera} {event_data['id']}"
) )
if source_type == EventTypeEnum.tracked_object: if source_type == EventTypeEnum.tracked_object:
id = event_data["id"]
self.timeline_queue.put( self.timeline_queue.put(
( (
camera, camera,
source_type, source_type,
event_type, event_type,
self.events_in_process.get(id), self.events_in_process.get(event_data["id"]),
event_data, event_data,
) )
) )
# if this is the first message, just store it and continue, its not time to insert it in the db if event_type == EventStateEnum.start:
if ( self.events_in_process[event_data["id"]] = event_data
event_type == EventStateEnum.start
or id not in self.events_in_process
):
self.events_in_process[id] = event_data
continue continue
self.handle_object_detection(event_type, camera, event_data) self.handle_object_detection(event_type, camera, event_data)
@@ -128,6 +123,10 @@ class EventProcessor(threading.Thread):
"""handle tracked object event updates.""" """handle tracked object event updates."""
updated_db = False updated_db = False
# if this is the first message, just store it and continue, its not time to insert it in the db
if event_type == EventStateEnum.start:
self.events_in_process[event_data["id"]] = event_data
if should_update_db(self.events_in_process[event_data["id"]], event_data): if should_update_db(self.events_in_process[event_data["id"]], event_data):
updated_db = True updated_db = True
camera_config = self.config.cameras[camera] camera_config = self.config.cameras[camera]
@@ -211,7 +210,6 @@ class EventProcessor(threading.Thread):
"top_score": event_data["top_score"], "top_score": event_data["top_score"],
"attributes": attributes, "attributes": attributes,
"type": "object", "type": "object",
"max_severity": event_data.get("max_severity"),
}, },
} }

View File

@@ -71,8 +71,8 @@ PRESETS_HW_ACCEL_DECODE = {
"preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m", "preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m",
"preset-rpi-64-h265": "-c:v:1 hevc_v4l2m2m", "preset-rpi-64-h265": "-c:v:1 hevc_v4l2m2m",
FFMPEG_HWACCEL_VAAPI: f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi", FFMPEG_HWACCEL_VAAPI: f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi",
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv -bsf:v dump_extra", # https://trac.ffmpeg.org/ticket/9766#comment:17 "preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv",
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v hevc_qsv -bsf:v dump_extra", # https://trac.ffmpeg.org/ticket/9766#comment:17 "preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v hevc_qsv",
FFMPEG_HWACCEL_NVIDIA: "-hwaccel cuda -hwaccel_output_format cuda", FFMPEG_HWACCEL_NVIDIA: "-hwaccel cuda -hwaccel_output_format cuda",
"preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}", "preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}",
"preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}", "preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}",

View File

@@ -1,17 +1,14 @@
"""Generative AI module for Frigate.""" """Generative AI module for Frigate."""
import importlib import importlib
import logging
import os import os
from typing import Optional from typing import Optional
from playhouse.shortcuts import model_to_dict from playhouse.shortcuts import model_to_dict
from frigate.config import CameraConfig, FrigateConfig, GenAIConfig, GenAIProviderEnum from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum
from frigate.models import Event from frigate.models import Event
logger = logging.getLogger(__name__)
PROVIDERS = {} PROVIDERS = {}
@@ -44,7 +41,6 @@ class GenAIClient:
event.label, event.label,
camera_config.genai.prompt, camera_config.genai.prompt,
).format(**model_to_dict(event)) ).format(**model_to_dict(event))
logger.debug(f"Sending images to genai provider with prompt: {prompt}")
return self._send(prompt, thumbnails) return self._send(prompt, thumbnails)
def _init_provider(self): def _init_provider(self):
@@ -56,19 +52,13 @@ class GenAIClient:
return None return None
def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]: def get_genai_client(genai_config: GenAIConfig) -> Optional[GenAIClient]:
"""Get the GenAI client.""" """Get the GenAI client."""
genai_config = config.genai if genai_config.enabled:
genai_cameras = [
c for c in config.cameras.values() if c.enabled and c.genai.enabled
]
if genai_cameras:
load_providers() load_providers()
provider = PROVIDERS.get(genai_config.provider) provider = PROVIDERS.get(genai_config.provider)
if provider: if provider:
return provider(genai_config) return provider(genai_config)
return None return None

Some files were not shown because too many files have changed in this diff Show More