forked from Github/frigate
Compare commits
49 Commits
dependabot
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
27a43e420a | ||
|
|
e7ad38d827 | ||
|
|
a1ce9aacf2 | ||
|
|
322b847356 | ||
|
|
98338e4c7f | ||
|
|
171a89f37b | ||
|
|
8114b541a8 | ||
|
|
c48396c5c6 | ||
|
|
00371546a3 | ||
|
|
87e7b62c85 | ||
|
|
15ffe5c254 | ||
|
|
a767dad3a1 | ||
|
|
9387246f83 | ||
|
|
bed20de302 | ||
|
|
70fc5393b1 | ||
|
|
9b80dbe014 | ||
|
|
78a013d63a | ||
|
|
ddfe8f3921 | ||
|
|
4af752028f | ||
|
|
b149828c9f | ||
|
|
3dc26e78ef | ||
|
|
d9ef8fa206 | ||
|
|
292499aebc | ||
|
|
717493e668 | ||
|
|
d49f958d4d | ||
|
|
33ee32865f | ||
|
|
17f8939f97 | ||
|
|
1b7fe9523d | ||
|
|
0763f56047 | ||
|
|
1ea282fba8 | ||
|
|
869fa2631e | ||
|
|
f336a91fee | ||
|
|
d302b6e198 | ||
|
|
ed2e1f3f72 | ||
|
|
b4d82084a9 | ||
|
|
53b96dfb89 | ||
|
|
0e3fb6cbdd | ||
|
|
6b12a45a95 | ||
|
|
0b9c4c18dd | ||
|
|
d0cc8cb64b | ||
|
|
bb86e71e65 | ||
|
|
8aa6297308 | ||
|
|
d3b631a952 | ||
|
|
47d495fc01 | ||
|
|
32322b23b2 | ||
|
|
c0ba98e26f | ||
|
|
a5a7cd3107 | ||
|
|
a729408599 | ||
|
|
4dddc53735 |
25
.github/workflows/ci.yml
vendored
25
.github/workflows/ci.yml
vendored
@@ -7,7 +7,7 @@ on:
|
||||
- dev
|
||||
- master
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- "docs/**"
|
||||
|
||||
# only run the latest commit to avoid cache overwrites
|
||||
concurrency:
|
||||
@@ -24,6 +24,8 @@ jobs:
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
@@ -45,6 +47,8 @@ jobs:
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
@@ -71,21 +75,14 @@ jobs:
|
||||
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
|
||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
|
||||
- name: Build and push Rockchip build
|
||||
uses: docker/bake-action@v3
|
||||
with:
|
||||
push: true
|
||||
targets: rk
|
||||
files: docker/rockchip/rk.hcl
|
||||
set: |
|
||||
rk.tags=${{ steps.setup.outputs.image-name }}-rk
|
||||
*.cache-from=type=gha
|
||||
jetson_jp4_build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Jetson Jetpack 4
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
@@ -112,6 +109,8 @@ jobs:
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
@@ -140,6 +139,8 @@ jobs:
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
@@ -165,6 +166,8 @@ jobs:
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
@@ -188,6 +191,8 @@ jobs:
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
|
||||
24
.github/workflows/dependabot-auto-merge.yaml
vendored
24
.github/workflows/dependabot-auto-merge.yaml
vendored
@@ -1,24 +0,0 @@
|
||||
name: dependabot-auto-merge
|
||||
on: pull_request
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
dependabot-auto-merge:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.actor == 'dependabot[bot]'
|
||||
steps:
|
||||
- name: Get Dependabot metadata
|
||||
id: metadata
|
||||
uses: dependabot/fetch-metadata@v2
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Enable auto-merge for Dependabot PRs
|
||||
if: steps.metadata.outputs.dependency-type == 'direct:development' && (steps.metadata.outputs.update-type == 'version-update:semver-minor' || steps.metadata.outputs.update-type == 'version-update:semver-patch')
|
||||
run: |
|
||||
gh pr review --approve "$PR_URL"
|
||||
gh pr merge --auto --squash "$PR_URL"
|
||||
env:
|
||||
PR_URL: ${{ github.event.pull_request.html_url }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
14
.github/workflows/pull_request.yml
vendored
14
.github/workflows/pull_request.yml
vendored
@@ -3,7 +3,7 @@ name: On pull request
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- "docs/**"
|
||||
|
||||
env:
|
||||
DEFAULT_PYTHON: 3.9
|
||||
@@ -19,6 +19,8 @@ jobs:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@master
|
||||
with:
|
||||
node-version: 16.x
|
||||
@@ -38,6 +40,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@master
|
||||
with:
|
||||
node-version: 16.x
|
||||
@@ -52,6 +56,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@master
|
||||
with:
|
||||
node-version: 20.x
|
||||
@@ -67,8 +73,10 @@ jobs:
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
|
||||
uses: actions/setup-python@v5.1.0
|
||||
uses: actions/setup-python@v5.3.0
|
||||
with:
|
||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||
- name: Install requirements
|
||||
@@ -88,6 +96,8 @@ jobs:
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@master
|
||||
with:
|
||||
node-version: 16.x
|
||||
|
||||
9
.github/workflows/release.yml
vendored
9
.github/workflows/release.yml
vendored
@@ -11,6 +11,8 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- id: lowercaseRepo
|
||||
uses: ASzc/change-string-case-action@v6
|
||||
with:
|
||||
@@ -22,10 +24,13 @@ jobs:
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Create tag variables
|
||||
env:
|
||||
TAG: ${{ github.ref_name }}
|
||||
LOWERCASE_REPO: ${{ steps.lowercaseRepo.outputs.lowercase }}
|
||||
run: |
|
||||
BUILD_TYPE=$([[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "stable" || echo "beta")
|
||||
BUILD_TYPE=$([[ "${TAG}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "stable" || echo "beta")
|
||||
echo "BUILD_TYPE=${BUILD_TYPE}" >> $GITHUB_ENV
|
||||
echo "BASE=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}" >> $GITHUB_ENV
|
||||
echo "BASE=ghcr.io/${LOWERCASE_REPO}" >> $GITHUB_ENV
|
||||
echo "BUILD_TAG=${GITHUB_SHA::7}" >> $GITHUB_ENV
|
||||
echo "CLEAN_VERSION=$(echo ${GITHUB_REF##*/} | tr '[:upper:]' '[:lower:]' | sed 's/^[v]//')" >> $GITHUB_ENV
|
||||
- name: Tag and push the main image
|
||||
|
||||
5
.github/workflows/stale.yml
vendored
5
.github/workflows/stale.yml
vendored
@@ -23,7 +23,9 @@ jobs:
|
||||
exempt-pr-labels: "pinned,security,dependencies"
|
||||
operations-per-run: 120
|
||||
- name: Print outputs
|
||||
run: echo ${{ join(steps.stale.outputs.*, ',') }}
|
||||
env:
|
||||
STALE_OUTPUT: ${{ join(steps.stale.outputs.*, ',') }}
|
||||
run: echo "$STALE_OUTPUT"
|
||||
|
||||
# clean_ghcr:
|
||||
# name: Delete outdated dev container images
|
||||
@@ -38,4 +40,3 @@ jobs:
|
||||
# account-type: personal
|
||||
# token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# token-type: github-token
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
appdirs==1.4.4
|
||||
argcomplete==2.0.0
|
||||
contextlib2==0.6.0.post1
|
||||
distlib==0.3.6
|
||||
filelock==3.8.0
|
||||
future==0.18.2
|
||||
importlib-metadata==5.1.0
|
||||
importlib-resources==5.1.2
|
||||
netaddr==0.8.0
|
||||
netifaces==0.10.9
|
||||
verboselogs==1.7
|
||||
virtualenv==20.17.0
|
||||
appdirs==1.4.*
|
||||
argcomplete==2.0.*
|
||||
contextlib2==0.6.*
|
||||
distlib==0.3.*
|
||||
filelock==3.8.*
|
||||
future==0.18.*
|
||||
importlib-metadata==5.1.*
|
||||
importlib-resources==5.1.*
|
||||
netaddr==0.8.*
|
||||
netifaces==0.10.*
|
||||
verboselogs==1.7.*
|
||||
virtualenv==20.17.*
|
||||
|
||||
@@ -41,7 +41,7 @@ transformers == 4.45.*
|
||||
# Generative AI
|
||||
google-generativeai == 0.8.*
|
||||
ollama == 0.3.*
|
||||
openai == 1.51.*
|
||||
openai == 1.59.*
|
||||
# push notifications
|
||||
py-vapid == 1.9.*
|
||||
pywebpush == 2.0.*
|
||||
|
||||
@@ -12,26 +12,11 @@ ARG TARGETARCH
|
||||
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
|
||||
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
|
||||
|
||||
# Build CuDNN
|
||||
FROM wget AS cudnn-deps
|
||||
|
||||
ARG COMPUTE_LEVEL
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y git build-essential
|
||||
|
||||
RUN wget https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.1-1_all.deb \
|
||||
&& dpkg -i cuda-keyring_1.1-1_all.deb \
|
||||
&& apt-get update \
|
||||
&& apt-get -y install cuda-toolkit \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
FROM tensorrt-base AS frigate-tensorrt
|
||||
ENV TRT_VER=8.5.3
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
pip3 install -U /deps/trt-wheels/*.whl && \
|
||||
ldconfig
|
||||
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
|
||||
|
||||
ENV LD_LIBRARY_PATH=/usr/local/lib/python3.9/dist-packages/tensorrt:/usr/local/cuda/lib64:/usr/local/lib/python3.9/dist-packages/nvidia/cufft/lib
|
||||
WORKDIR /opt/frigate/
|
||||
@@ -42,7 +27,7 @@ FROM devcontainer AS devcontainer-trt
|
||||
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
|
||||
COPY --from=trt-deps /usr/local/cuda-12.1 /usr/local/cuda
|
||||
COPY docker/tensorrt/detector/rootfs/ /
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
|
||||
@@ -24,6 +24,7 @@ ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
||||
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
COPY --from=trt-deps /usr/local/cuda-12.* /usr/local/cuda
|
||||
COPY docker/tensorrt/detector/rootfs/ /
|
||||
ENV YOLO_MODELS=""
|
||||
|
||||
|
||||
@@ -174,7 +174,7 @@ NOTE: The folder that is set for the config needs to be the folder that contains
|
||||
|
||||
### Custom go2rtc version
|
||||
|
||||
Frigate currently includes go2rtc v1.9.4, there may be certain cases where you want to run a different version of go2rtc.
|
||||
Frigate currently includes go2rtc v1.9.2, there may be certain cases where you want to run a different version of go2rtc.
|
||||
|
||||
To do this:
|
||||
|
||||
|
||||
@@ -41,6 +41,7 @@ cameras:
|
||||
...
|
||||
onvif:
|
||||
# Required: host of the camera being connected to.
|
||||
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
|
||||
host: 0.0.0.0
|
||||
# Optional: ONVIF port for device (default: shown below).
|
||||
port: 8000
|
||||
@@ -49,6 +50,8 @@ cameras:
|
||||
user: admin
|
||||
# Optional: password for login.
|
||||
password: admin
|
||||
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
|
||||
tls_insecure: False
|
||||
# Optional: PTZ camera object autotracking. Keeps a moving object in
|
||||
# the center of the frame by automatically moving the PTZ camera.
|
||||
autotracking:
|
||||
|
||||
@@ -5,6 +5,8 @@ title: Generative AI
|
||||
|
||||
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
|
||||
|
||||
Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle. Descriptions can also be regenerated manually via the Frigate UI.
|
||||
|
||||
:::info
|
||||
|
||||
Semantic Search must be enabled to use Generative AI.
|
||||
|
||||
@@ -231,28 +231,11 @@ docker run -d \
|
||||
|
||||
### Setup Decoder
|
||||
|
||||
The decoder you need to pass in the `hwaccel_args` will depend on the input video.
|
||||
|
||||
A list of supported codecs (you can use `ffmpeg -decoders | grep cuvid` in the container to get the ones your card supports)
|
||||
|
||||
```
|
||||
V..... h263_cuvid Nvidia CUVID H263 decoder (codec h263)
|
||||
V..... h264_cuvid Nvidia CUVID H264 decoder (codec h264)
|
||||
V..... hevc_cuvid Nvidia CUVID HEVC decoder (codec hevc)
|
||||
V..... mjpeg_cuvid Nvidia CUVID MJPEG decoder (codec mjpeg)
|
||||
V..... mpeg1_cuvid Nvidia CUVID MPEG1VIDEO decoder (codec mpeg1video)
|
||||
V..... mpeg2_cuvid Nvidia CUVID MPEG2VIDEO decoder (codec mpeg2video)
|
||||
V..... mpeg4_cuvid Nvidia CUVID MPEG4 decoder (codec mpeg4)
|
||||
V..... vc1_cuvid Nvidia CUVID VC1 decoder (codec vc1)
|
||||
V..... vp8_cuvid Nvidia CUVID VP8 decoder (codec vp8)
|
||||
V..... vp9_cuvid Nvidia CUVID VP9 decoder (codec vp9)
|
||||
```
|
||||
|
||||
For example, for H264 video, you'll select `preset-nvidia-h264`.
|
||||
Using `preset-nvidia` ffmpeg will automatically select the necessary profile for the incoming video, and will log an error if the profile is not supported by your GPU.
|
||||
|
||||
```yaml
|
||||
ffmpeg:
|
||||
hwaccel_args: preset-nvidia-h264
|
||||
hwaccel_args: preset-nvidia
|
||||
```
|
||||
|
||||
If everything is working correctly, you should see a significant improvement in performance.
|
||||
|
||||
@@ -23,7 +23,7 @@ If you are using go2rtc, you should adjust the following settings in your camera
|
||||
|
||||
- Video codec: **H.264** - provides the most compatible video codec with all Live view technologies and browsers. Avoid any kind of "smart codec" or "+" codec like _H.264+_ or _H.265+_. as these non-standard codecs remove keyframes (see below).
|
||||
- Audio codec: **AAC** - provides the most compatible audio codec with all Live view technologies and browsers that support audio.
|
||||
- I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes.
|
||||
- I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes. For many users this may not be an issue, but it should be noted that that a 1x i-frame interval will cause more storage utilization if you are using the stream for the `record` role as well.
|
||||
|
||||
The default video and audio codec on your camera may not always be compatible with your browser, which is why setting them to H.264 and AAC is recommended. See the [go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness) for codec support information.
|
||||
|
||||
|
||||
@@ -144,7 +144,9 @@ detectors:
|
||||
|
||||
#### SSDLite MobileNet v2
|
||||
|
||||
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
|
||||
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model.
|
||||
|
||||
Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
@@ -254,6 +256,7 @@ yolov4x-mish-640
|
||||
yolov7-tiny-288
|
||||
yolov7-tiny-416
|
||||
yolov7-640
|
||||
yolov7-416
|
||||
yolov7-320
|
||||
yolov7x-640
|
||||
yolov7x-320
|
||||
@@ -282,6 +285,8 @@ The TensorRT detector can be selected by specifying `tensorrt` as the model type
|
||||
|
||||
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
|
||||
|
||||
Use the config below to work with generated TRT models:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
tensorrt:
|
||||
|
||||
@@ -117,25 +117,27 @@ auth:
|
||||
hash_iterations: 600000
|
||||
|
||||
# Optional: model modifications
|
||||
# NOTE: The default values are for the EdgeTPU detector.
|
||||
# Other detectors will require the model config to be set.
|
||||
model:
|
||||
# Optional: path to the model (default: automatic based on detector)
|
||||
# Required: path to the model (default: automatic based on detector)
|
||||
path: /edgetpu_model.tflite
|
||||
# Optional: path to the labelmap (default: shown below)
|
||||
# Required: path to the labelmap (default: shown below)
|
||||
labelmap_path: /labelmap.txt
|
||||
# Required: Object detection model input width (default: shown below)
|
||||
width: 320
|
||||
# Required: Object detection model input height (default: shown below)
|
||||
height: 320
|
||||
# Optional: Object detection model input colorspace
|
||||
# Required: Object detection model input colorspace
|
||||
# Valid values are rgb, bgr, or yuv. (default: shown below)
|
||||
input_pixel_format: rgb
|
||||
# Optional: Object detection model input tensor format
|
||||
# Required: Object detection model input tensor format
|
||||
# Valid values are nhwc or nchw (default: shown below)
|
||||
input_tensor: nhwc
|
||||
# Optional: Object detection model type, currently only used with the OpenVINO detector
|
||||
# Required: Object detection model type, currently only used with the OpenVINO detector
|
||||
# Valid values are ssd, yolox, yolonas (default: shown below)
|
||||
model_type: ssd
|
||||
# Optional: Label name modifications. These are merged into the standard labelmap.
|
||||
# Required: Label name modifications. These are merged into the standard labelmap.
|
||||
labelmap:
|
||||
2: vehicle
|
||||
# Optional: Map of object labels to their attribute labels (default: depends on model)
|
||||
@@ -686,6 +688,7 @@ cameras:
|
||||
# to enable PTZ controls.
|
||||
onvif:
|
||||
# Required: host of the camera being connected to.
|
||||
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
|
||||
host: 0.0.0.0
|
||||
# Optional: ONVIF port for device (default: shown below).
|
||||
port: 8000
|
||||
@@ -694,6 +697,8 @@ cameras:
|
||||
user: admin
|
||||
# Optional: password for login.
|
||||
password: admin
|
||||
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
|
||||
tls_insecure: False
|
||||
# Optional: Ignores time synchronization mismatches between the camera and the server during authentication.
|
||||
# Using NTP on both ends is recommended and this should only be set to True in a "safe" environment due to the security risk it represents.
|
||||
ignore_time_mismatch: False
|
||||
@@ -757,6 +762,8 @@ cameras:
|
||||
- cat
|
||||
# Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify)
|
||||
required_zones: []
|
||||
# Optional: Save thumbnails sent to generative AI for review/debugging purposes (default: shown below)
|
||||
debug_save_thumbnails: False
|
||||
|
||||
# Optional
|
||||
ui:
|
||||
|
||||
@@ -132,6 +132,28 @@ cameras:
|
||||
- detect
|
||||
```
|
||||
|
||||
## Handling Complex Passwords
|
||||
|
||||
go2rtc expects URL-encoded passwords in the config, [urlencoder.org](https://urlencoder.org) can be used for this purpose.
|
||||
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
my_camera: rtsp://username:$@foo%@192.168.1.100
|
||||
```
|
||||
|
||||
becomes
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
my_camera: rtsp://username:$%40foo%25@192.168.1.100
|
||||
```
|
||||
|
||||
See [this comment(https://github.com/AlexxIT/go2rtc/issues/1217#issuecomment-2242296489) for more information.
|
||||
|
||||
## Advanced Restream Configurations
|
||||
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
|
||||
@@ -5,7 +5,7 @@ title: Using Semantic Search
|
||||
|
||||
Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results.
|
||||
|
||||
Frigate has support for [Jina AI's CLIP model](https://huggingface.co/jinaai/jina-clip-v1) to create embeddings, which runs locally. Embeddings are then saved to Frigate's database.
|
||||
Frigate uses [Jina AI's CLIP model](https://huggingface.co/jinaai/jina-clip-v1) to create and save embeddings to Frigate's database. All of this runs locally.
|
||||
|
||||
Semantic Search is accessed via the _Explore_ view in the Frigate UI.
|
||||
|
||||
@@ -19,7 +19,7 @@ For best performance, 16GB or more of RAM and a dedicated GPU are recommended.
|
||||
|
||||
## Configuration
|
||||
|
||||
Semantic Search is disabled by default, and must be enabled in your config file before it can be used. Semantic Search is a global configuration setting.
|
||||
Semantic Search is disabled by default, and must be enabled in your config file or in the UI's Settings page before it can be used. Semantic Search is a global configuration setting.
|
||||
|
||||
```yaml
|
||||
semantic_search:
|
||||
@@ -29,9 +29,9 @@ semantic_search:
|
||||
|
||||
:::tip
|
||||
|
||||
The embeddings database can be re-indexed from the existing tracked objects in your database by adding `reindex: True` to your `semantic_search` configuration. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. Make sure to set the config back to `False` before restarting Frigate again.
|
||||
The embeddings database can be re-indexed from the existing tracked objects in your database by adding `reindex: True` to your `semantic_search` configuration or by toggling the switch on the Search Settings page in the UI and restarting Frigate. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. Make sure to turn the UI's switch off or set the config back to `False` before restarting Frigate again.
|
||||
|
||||
If you are enabling the Search feature for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to enable the `reindex` feature in order to do that.
|
||||
If you are enabling Semantic Search for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to enable the `reindex` feature in order to do that.
|
||||
|
||||
:::
|
||||
|
||||
@@ -39,9 +39,9 @@ If you are enabling the Search feature for the first time, be advised that Friga
|
||||
|
||||
The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
||||
|
||||
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
||||
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
||||
|
||||
Differently weighted CLIP models are available and can be selected by setting the `model_size` config option as `small` or `large`:
|
||||
Differently weighted versions of the Jina model are available and can be selected by setting the `model_size` config option as `small` or `large`:
|
||||
|
||||
```yaml
|
||||
semantic_search:
|
||||
@@ -50,7 +50,7 @@ semantic_search:
|
||||
```
|
||||
|
||||
- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
|
||||
- Configuring the `small` model employs a quantized version of the model that uses less RAM and runs on CPU with a very negligible difference in embedding quality.
|
||||
- Configuring the `small` model employs a quantized version of the Jina model that uses less RAM and runs on CPU with a very negligible difference in embedding quality.
|
||||
|
||||
### GPU Acceleration
|
||||
|
||||
@@ -84,7 +84,7 @@ If the correct build is used for your GPU and the `large` model is configured, t
|
||||
|
||||
## Usage and Best Practices
|
||||
|
||||
1. Semantic Search is used in conjunction with the other filters available on the Search page. Use a combination of traditional filtering and Semantic Search for the best results.
|
||||
1. Semantic Search is used in conjunction with the other filters available on the Explore page. Use a combination of traditional filtering and Semantic Search for the best results.
|
||||
2. Use the thumbnail search type when searching for particular objects in the scene. Use the description search type when attempting to discern the intent of your object.
|
||||
3. Because of how the AI models Frigate uses have been trained, the comparison between text and image embedding distances generally means that with multi-modal (`thumbnail` and `description`) searches, results matching `description` will appear first, even if a `thumbnail` embedding may be a better match. Play with the "Search Type" setting to help find what you are looking for. Note that if you are generating descriptions for specific objects or zones only, this may cause search results to prioritize the objects with descriptions even if the the ones without them are more relevant.
|
||||
4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day".
|
||||
|
||||
@@ -28,7 +28,7 @@ For the Dahua/Loryta 5442 camera, I use the following settings:
|
||||
- Encode Mode: H.264
|
||||
- Resolution: 2688\*1520
|
||||
- Frame Rate(FPS): 15
|
||||
- I Frame Interval: 30
|
||||
- I Frame Interval: 30 (15 can also be used to prioritize streaming performance - see the [camera settings recommendations](../configuration/live) for more info)
|
||||
|
||||
**Sub Stream (Detection)**
|
||||
|
||||
|
||||
@@ -98,3 +98,11 @@ docker run -d \
|
||||
-p 8555:8555/udp \
|
||||
ghcr.io/blakeblackshear/frigate:stable
|
||||
```
|
||||
|
||||
### My RTSP stream works fine in VLC, but it does not work when I put the same URL in my Frigate config. Is this a bug?
|
||||
|
||||
No. Frigate uses the TCP protocol to connect to your camera's RTSP URL. VLC automatically switches between UDP and TCP depending on network conditions and stream availability. So a stream that works in VLC but not in Frigate is likely due to VLC selecting UDP as the transfer protocol.
|
||||
|
||||
TCP ensures that all data packets arrive in the correct order. This is crucial for video recording, decoding, and stream processing, which is why Frigate enforces a TCP connection. UDP is faster but less reliable, as it does not guarantee packet delivery or order, and VLC does not have the same requirements as Frigate.
|
||||
|
||||
You can still configure Frigate to use UDP by using ffmpeg input args or the preset `preset-rtsp-udp`. See the [ffmpeg presets](/configuration/ffmpeg_presets) documentation.
|
||||
|
||||
7069
docs/package-lock.json
generated
7069
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -17,15 +17,15 @@
|
||||
"write-heading-ids": "docusaurus write-heading-ids"
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "^3.5.2",
|
||||
"@docusaurus/preset-classic": "^3.5.2",
|
||||
"@docusaurus/theme-mermaid": "^3.5.2",
|
||||
"@docusaurus/plugin-content-docs": "^3.5.2",
|
||||
"@mdx-js/react": "^3.0.1",
|
||||
"@docusaurus/core": "^3.6.3",
|
||||
"@docusaurus/preset-classic": "^3.6.3",
|
||||
"@docusaurus/theme-mermaid": "^3.6.3",
|
||||
"@docusaurus/plugin-content-docs": "^3.6.3",
|
||||
"@mdx-js/react": "^3.1.0",
|
||||
"clsx": "^2.1.1",
|
||||
"docusaurus-plugin-openapi-docs": "^4.1.0",
|
||||
"docusaurus-theme-openapi-docs": "^4.1.0",
|
||||
"prism-react-renderer": "^2.4.0",
|
||||
"docusaurus-plugin-openapi-docs": "^4.3.1",
|
||||
"docusaurus-theme-openapi-docs": "^4.3.1",
|
||||
"prism-react-renderer": "^2.4.1",
|
||||
"raw-loader": "^4.0.2",
|
||||
"react": "^18.3.1",
|
||||
"react-dom": "^18.3.1"
|
||||
|
||||
1199
docs/static/frigate-api.yaml
vendored
1199
docs/static/frigate-api.yaml
vendored
File diff suppressed because it is too large
Load Diff
@@ -17,17 +17,17 @@ from fastapi.responses import JSONResponse, PlainTextResponse
|
||||
from markupsafe import escape
|
||||
from peewee import operator
|
||||
|
||||
from frigate.api.defs.app_body import AppConfigSetBody
|
||||
from frigate.api.defs.app_query_parameters import AppTimelineHourlyQueryParameters
|
||||
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
|
||||
from frigate.api.defs.request.app_body import AppConfigSetBody
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import CONFIG_DIR
|
||||
from frigate.models import Event, Timeline
|
||||
from frigate.util.builtin import (
|
||||
clean_camera_user_pass,
|
||||
get_tz_modifiers,
|
||||
update_yaml_from_url,
|
||||
)
|
||||
from frigate.util.config import find_config_file
|
||||
from frigate.util.services import (
|
||||
ffprobe_stream,
|
||||
get_nvidia_driver_info,
|
||||
@@ -134,9 +134,27 @@ def config(request: Request):
|
||||
for zone_name, zone in config_obj.cameras[camera_name].zones.items():
|
||||
camera_dict["zones"][zone_name]["color"] = zone.color
|
||||
|
||||
# remove go2rtc stream passwords
|
||||
go2rtc: dict[str, any] = config_obj.go2rtc.model_dump(
|
||||
mode="json", warnings="none", exclude_none=True
|
||||
)
|
||||
for stream_name, stream in go2rtc.get("streams", {}).items():
|
||||
if stream is None:
|
||||
continue
|
||||
if isinstance(stream, str):
|
||||
cleaned = clean_camera_user_pass(stream)
|
||||
else:
|
||||
cleaned = []
|
||||
|
||||
for item in stream:
|
||||
cleaned.append(clean_camera_user_pass(item))
|
||||
|
||||
config["go2rtc"]["streams"][stream_name] = cleaned
|
||||
|
||||
config["plus"] = {"enabled": request.app.frigate_config.plus_api.is_active()}
|
||||
config["model"]["colormap"] = config_obj.model.colormap
|
||||
|
||||
# use merged labelamp
|
||||
for detector_config in config["detectors"].values():
|
||||
detector_config["model"]["labelmap"] = (
|
||||
request.app.frigate_config.model.merged_labelmap
|
||||
@@ -147,13 +165,7 @@ def config(request: Request):
|
||||
|
||||
@router.get("/config/raw")
|
||||
def config_raw():
|
||||
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
config_file = find_config_file()
|
||||
|
||||
if not os.path.isfile(config_file):
|
||||
return JSONResponse(
|
||||
@@ -198,13 +210,7 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
|
||||
|
||||
# Save the config to file
|
||||
try:
|
||||
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
config_file = find_config_file()
|
||||
|
||||
with open(config_file, "w") as f:
|
||||
f.write(new_config)
|
||||
@@ -253,13 +259,7 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
|
||||
|
||||
@router.put("/config/set")
|
||||
def config_set(request: Request, body: AppConfigSetBody):
|
||||
config_file = os.environ.get("CONFIG_FILE", f"{CONFIG_DIR}/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
config_file = find_config_file()
|
||||
|
||||
with open(config_file, "r") as f:
|
||||
old_raw_config = f.read()
|
||||
|
||||
@@ -18,7 +18,7 @@ from joserfc import jwt
|
||||
from peewee import DoesNotExist
|
||||
from slowapi import Limiter
|
||||
|
||||
from frigate.api.defs.app_body import (
|
||||
from frigate.api.defs.request.app_body import (
|
||||
AppPostLoginBody,
|
||||
AppPostUsersBody,
|
||||
AppPutPasswordBody,
|
||||
@@ -85,7 +85,12 @@ def get_remote_addr(request: Request):
|
||||
return str(ip)
|
||||
|
||||
# if there wasn't anything in the route, just return the default
|
||||
return request.remote_addr or "127.0.0.1"
|
||||
remote_addr = None
|
||||
|
||||
if hasattr(request, "remote_addr"):
|
||||
remote_addr = request.remote_addr
|
||||
|
||||
return remote_addr or "127.0.0.1"
|
||||
|
||||
|
||||
def get_jwt_secret() -> str:
|
||||
@@ -324,7 +329,7 @@ def login(request: Request, body: AppPostLoginBody):
|
||||
try:
|
||||
db_user: User = User.get_by_id(user)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(content={"message": "Login failed"}, status_code=400)
|
||||
return JSONResponse(content={"message": "Login failed"}, status_code=401)
|
||||
|
||||
password_hash = db_user.password_hash
|
||||
if verify_password(password, password_hash):
|
||||
@@ -335,7 +340,7 @@ def login(request: Request, body: AppPostLoginBody):
|
||||
response, JWT_COOKIE_NAME, encoded_jwt, expiration, JWT_COOKIE_SECURE
|
||||
)
|
||||
return response
|
||||
return JSONResponse(content={"message": "Login failed"}, status_code=400)
|
||||
return JSONResponse(content={"message": "Login failed"}, status_code=401)
|
||||
|
||||
|
||||
@router.get("/users")
|
||||
|
||||
@@ -3,7 +3,7 @@ from typing import Union
|
||||
from pydantic import BaseModel
|
||||
from pydantic.json_schema import SkipJsonSchema
|
||||
|
||||
from frigate.review.maintainer import SeverityEnum
|
||||
from frigate.review.types import SeverityEnum
|
||||
|
||||
|
||||
class ReviewQueryParams(BaseModel):
|
||||
42
frigate/api/defs/response/event_response.py
Normal file
42
frigate/api/defs/response/event_response.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
|
||||
class EventResponse(BaseModel):
|
||||
id: str
|
||||
label: str
|
||||
sub_label: Optional[str]
|
||||
camera: str
|
||||
start_time: float
|
||||
end_time: Optional[float]
|
||||
false_positive: Optional[bool]
|
||||
zones: list[str]
|
||||
thumbnail: str
|
||||
has_clip: bool
|
||||
has_snapshot: bool
|
||||
retain_indefinitely: bool
|
||||
plus_id: Optional[str]
|
||||
model_hash: Optional[str]
|
||||
detector_type: Optional[str]
|
||||
model_type: Optional[str]
|
||||
data: dict[str, Any]
|
||||
|
||||
model_config = ConfigDict(protected_namespaces=())
|
||||
|
||||
|
||||
class EventCreateResponse(BaseModel):
|
||||
success: bool
|
||||
message: str
|
||||
event_id: str
|
||||
|
||||
|
||||
class EventMultiDeleteResponse(BaseModel):
|
||||
success: bool
|
||||
deleted_events: list[str]
|
||||
not_found_events: list[str]
|
||||
|
||||
|
||||
class EventUploadPlusResponse(BaseModel):
|
||||
success: bool
|
||||
plus_id: str
|
||||
@@ -3,7 +3,7 @@ from typing import Dict
|
||||
|
||||
from pydantic import BaseModel, Json
|
||||
|
||||
from frigate.review.maintainer import SeverityEnum
|
||||
from frigate.review.types import SeverityEnum
|
||||
|
||||
|
||||
class ReviewSegmentResponse(BaseModel):
|
||||
@@ -14,7 +14,16 @@ from fastapi.responses import JSONResponse
|
||||
from peewee import JOIN, DoesNotExist, fn, operator
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
from frigate.api.defs.events_body import (
|
||||
from frigate.api.defs.query.events_query_parameters import (
|
||||
DEFAULT_TIME_RANGE,
|
||||
EventsQueryParams,
|
||||
EventsSearchQueryParams,
|
||||
EventsSummaryQueryParams,
|
||||
)
|
||||
from frigate.api.defs.query.regenerate_query_parameters import (
|
||||
RegenerateQueryParameters,
|
||||
)
|
||||
from frigate.api.defs.request.events_body import (
|
||||
EventsCreateBody,
|
||||
EventsDeleteBody,
|
||||
EventsDescriptionBody,
|
||||
@@ -22,19 +31,15 @@ from frigate.api.defs.events_body import (
|
||||
EventsSubLabelBody,
|
||||
SubmitPlusBody,
|
||||
)
|
||||
from frigate.api.defs.events_query_parameters import (
|
||||
DEFAULT_TIME_RANGE,
|
||||
EventsQueryParams,
|
||||
EventsSearchQueryParams,
|
||||
EventsSummaryQueryParams,
|
||||
)
|
||||
from frigate.api.defs.regenerate_query_parameters import (
|
||||
RegenerateQueryParameters,
|
||||
from frigate.api.defs.response.event_response import (
|
||||
EventCreateResponse,
|
||||
EventMultiDeleteResponse,
|
||||
EventResponse,
|
||||
EventUploadPlusResponse,
|
||||
)
|
||||
from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.const import (
|
||||
CLIPS_DIR,
|
||||
)
|
||||
from frigate.const import CLIPS_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.events.external import ExternalEventProcessor
|
||||
from frigate.models import Event, ReviewSegment, Timeline
|
||||
@@ -46,7 +51,7 @@ logger = logging.getLogger(__name__)
|
||||
router = APIRouter(tags=[Tags.events])
|
||||
|
||||
|
||||
@router.get("/events")
|
||||
@router.get("/events", response_model=list[EventResponse])
|
||||
def events(params: EventsQueryParams = Depends()):
|
||||
camera = params.camera
|
||||
cameras = params.cameras
|
||||
@@ -248,6 +253,8 @@ def events(params: EventsQueryParams = Depends()):
|
||||
order_by = Event.start_time.asc()
|
||||
elif sort == "date_desc":
|
||||
order_by = Event.start_time.desc()
|
||||
else:
|
||||
order_by = Event.start_time.desc()
|
||||
else:
|
||||
order_by = Event.start_time.desc()
|
||||
|
||||
@@ -263,7 +270,7 @@ def events(params: EventsQueryParams = Depends()):
|
||||
return JSONResponse(content=list(events))
|
||||
|
||||
|
||||
@router.get("/events/explore")
|
||||
@router.get("/events/explore", response_model=list[EventResponse])
|
||||
def events_explore(limit: int = 10):
|
||||
# get distinct labels for all events
|
||||
distinct_labels = Event.select(Event.label).distinct().order_by(Event.label)
|
||||
@@ -308,7 +315,8 @@ def events_explore(limit: int = 10):
|
||||
"data": {
|
||||
k: v
|
||||
for k, v in event.data.items()
|
||||
if k in ["type", "score", "top_score", "description"]
|
||||
if k
|
||||
in ["type", "score", "top_score", "description", "sub_label_score"]
|
||||
},
|
||||
"event_count": label_counts[event.label],
|
||||
}
|
||||
@@ -324,7 +332,7 @@ def events_explore(limit: int = 10):
|
||||
return JSONResponse(content=processed_events)
|
||||
|
||||
|
||||
@router.get("/event_ids")
|
||||
@router.get("/event_ids", response_model=list[EventResponse])
|
||||
def event_ids(ids: str):
|
||||
ids = ids.split(",")
|
||||
|
||||
@@ -582,19 +590,17 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
||||
|
||||
processed_events.append(processed_event)
|
||||
|
||||
# Sort by search distance if search_results are available, otherwise by start_time as default
|
||||
if search_results:
|
||||
if (sort is None or sort == "relevance") and search_results:
|
||||
processed_events.sort(key=lambda x: x.get("search_distance", float("inf")))
|
||||
elif min_score is not None and max_score is not None and sort == "score_asc":
|
||||
processed_events.sort(key=lambda x: x["score"])
|
||||
elif min_score is not None and max_score is not None and sort == "score_desc":
|
||||
processed_events.sort(key=lambda x: x["score"], reverse=True)
|
||||
elif sort == "date_asc":
|
||||
processed_events.sort(key=lambda x: x["start_time"])
|
||||
else:
|
||||
if sort == "score_asc":
|
||||
processed_events.sort(key=lambda x: x["score"])
|
||||
elif sort == "score_desc":
|
||||
processed_events.sort(key=lambda x: x["score"], reverse=True)
|
||||
elif sort == "date_asc":
|
||||
processed_events.sort(key=lambda x: x["start_time"])
|
||||
else:
|
||||
# "date_desc" default
|
||||
processed_events.sort(key=lambda x: x["start_time"], reverse=True)
|
||||
# "date_desc" default
|
||||
processed_events.sort(key=lambda x: x["start_time"], reverse=True)
|
||||
|
||||
# Limit the number of events returned
|
||||
processed_events = processed_events[:limit]
|
||||
@@ -647,7 +653,7 @@ def events_summary(params: EventsSummaryQueryParams = Depends()):
|
||||
return JSONResponse(content=[e for e in groups.dicts()])
|
||||
|
||||
|
||||
@router.get("/events/{event_id}")
|
||||
@router.get("/events/{event_id}", response_model=EventResponse)
|
||||
def event(event_id: str):
|
||||
try:
|
||||
return model_to_dict(Event.get(Event.id == event_id))
|
||||
@@ -655,7 +661,7 @@ def event(event_id: str):
|
||||
return JSONResponse(content="Event not found", status_code=404)
|
||||
|
||||
|
||||
@router.post("/events/{event_id}/retain")
|
||||
@router.post("/events/{event_id}/retain", response_model=GenericResponse)
|
||||
def set_retain(event_id: str):
|
||||
try:
|
||||
event = Event.get(Event.id == event_id)
|
||||
@@ -674,7 +680,7 @@ def set_retain(event_id: str):
|
||||
)
|
||||
|
||||
|
||||
@router.post("/events/{event_id}/plus")
|
||||
@router.post("/events/{event_id}/plus", response_model=EventUploadPlusResponse)
|
||||
def send_to_plus(request: Request, event_id: str, body: SubmitPlusBody = None):
|
||||
if not request.app.frigate_config.plus_api.is_active():
|
||||
message = "PLUS_API_KEY environment variable is not set"
|
||||
@@ -786,7 +792,7 @@ def send_to_plus(request: Request, event_id: str, body: SubmitPlusBody = None):
|
||||
)
|
||||
|
||||
|
||||
@router.put("/events/{event_id}/false_positive")
|
||||
@router.put("/events/{event_id}/false_positive", response_model=EventUploadPlusResponse)
|
||||
def false_positive(request: Request, event_id: str):
|
||||
if not request.app.frigate_config.plus_api.is_active():
|
||||
message = "PLUS_API_KEY environment variable is not set"
|
||||
@@ -875,7 +881,7 @@ def false_positive(request: Request, event_id: str):
|
||||
)
|
||||
|
||||
|
||||
@router.delete("/events/{event_id}/retain")
|
||||
@router.delete("/events/{event_id}/retain", response_model=GenericResponse)
|
||||
def delete_retain(event_id: str):
|
||||
try:
|
||||
event = Event.get(Event.id == event_id)
|
||||
@@ -894,7 +900,7 @@ def delete_retain(event_id: str):
|
||||
)
|
||||
|
||||
|
||||
@router.post("/events/{event_id}/sub_label")
|
||||
@router.post("/events/{event_id}/sub_label", response_model=GenericResponse)
|
||||
def set_sub_label(
|
||||
request: Request,
|
||||
event_id: str,
|
||||
@@ -946,7 +952,7 @@ def set_sub_label(
|
||||
)
|
||||
|
||||
|
||||
@router.post("/events/{event_id}/description")
|
||||
@router.post("/events/{event_id}/description", response_model=GenericResponse)
|
||||
def set_description(
|
||||
request: Request,
|
||||
event_id: str,
|
||||
@@ -993,7 +999,7 @@ def set_description(
|
||||
)
|
||||
|
||||
|
||||
@router.put("/events/{event_id}/description/regenerate")
|
||||
@router.put("/events/{event_id}/description/regenerate", response_model=GenericResponse)
|
||||
def regenerate_description(
|
||||
request: Request, event_id: str, params: RegenerateQueryParameters = Depends()
|
||||
):
|
||||
@@ -1064,14 +1070,14 @@ def delete_single_event(event_id: str, request: Request) -> dict:
|
||||
return {"success": True, "message": f"Event {event_id} deleted"}
|
||||
|
||||
|
||||
@router.delete("/events/{event_id}")
|
||||
@router.delete("/events/{event_id}", response_model=GenericResponse)
|
||||
def delete_event(request: Request, event_id: str):
|
||||
result = delete_single_event(event_id, request)
|
||||
status_code = 200 if result["success"] else 404
|
||||
return JSONResponse(content=result, status_code=status_code)
|
||||
|
||||
|
||||
@router.delete("/events/")
|
||||
@router.delete("/events/", response_model=EventMultiDeleteResponse)
|
||||
def delete_events(request: Request, body: EventsDeleteBody):
|
||||
if not body.event_ids:
|
||||
return JSONResponse(
|
||||
@@ -1097,7 +1103,7 @@ def delete_events(request: Request, body: EventsDeleteBody):
|
||||
return JSONResponse(content=response, status_code=200)
|
||||
|
||||
|
||||
@router.post("/events/{camera_name}/{label}/create")
|
||||
@router.post("/events/{camera_name}/{label}/create", response_model=EventCreateResponse)
|
||||
def create_event(
|
||||
request: Request,
|
||||
camera_name: str,
|
||||
@@ -1153,7 +1159,7 @@ def create_event(
|
||||
)
|
||||
|
||||
|
||||
@router.put("/events/{event_id}/end")
|
||||
@router.put("/events/{event_id}/end", response_model=GenericResponse)
|
||||
def end_event(request: Request, event_id: str, body: EventsEndBody):
|
||||
try:
|
||||
end_time = body.end_time or datetime.datetime.now().timestamp()
|
||||
|
||||
@@ -9,6 +9,7 @@ import psutil
|
||||
from fastapi import APIRouter, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from peewee import DoesNotExist
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody
|
||||
from frigate.api.defs.tags import Tags
|
||||
@@ -207,3 +208,14 @@ def export_delete(event_id: str):
|
||||
),
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/exports/{export_id}")
|
||||
def get_export(export_id: str):
|
||||
try:
|
||||
return JSONResponse(content=model_to_dict(Export.get(Export.id == export_id)))
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
@@ -87,7 +87,11 @@ def create_fastapi_app(
|
||||
logger.info("FastAPI started")
|
||||
|
||||
# Rate limiter (used for login endpoint)
|
||||
auth.rateLimiter.set_limit(frigate_config.auth.failed_login_rate_limit or "")
|
||||
if frigate_config.auth.failed_login_rate_limit is None:
|
||||
limiter.enabled = False
|
||||
else:
|
||||
auth.rateLimiter.set_limit(frigate_config.auth.failed_login_rate_limit)
|
||||
|
||||
app.state.limiter = limiter
|
||||
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
|
||||
app.add_middleware(SlowAPIMiddleware)
|
||||
|
||||
@@ -20,7 +20,7 @@ from pathvalidate import sanitize_filename
|
||||
from peewee import DoesNotExist, fn
|
||||
from tzlocal import get_localzone_name
|
||||
|
||||
from frigate.api.defs.media_query_parameters import (
|
||||
from frigate.api.defs.query.media_query_parameters import (
|
||||
Extension,
|
||||
MediaEventsSnapshotQueryParams,
|
||||
MediaLatestFrameQueryParams,
|
||||
|
||||
@@ -12,20 +12,21 @@ from fastapi.responses import JSONResponse
|
||||
from peewee import Case, DoesNotExist, fn, operator
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
from frigate.api.defs.generic_response import GenericResponse
|
||||
from frigate.api.defs.review_body import ReviewModifyMultipleBody
|
||||
from frigate.api.defs.review_query_parameters import (
|
||||
from frigate.api.defs.query.review_query_parameters import (
|
||||
ReviewActivityMotionQueryParams,
|
||||
ReviewQueryParams,
|
||||
ReviewSummaryQueryParams,
|
||||
)
|
||||
from frigate.api.defs.review_responses import (
|
||||
from frigate.api.defs.request.review_body import ReviewModifyMultipleBody
|
||||
from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.response.review_response import (
|
||||
ReviewActivityMotionResponse,
|
||||
ReviewSegmentResponse,
|
||||
ReviewSummaryResponse,
|
||||
)
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.models import Recordings, ReviewSegment
|
||||
from frigate.review.types import SeverityEnum
|
||||
from frigate.util.builtin import get_tz_modifiers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -161,7 +162,7 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == "alert"),
|
||||
(ReviewSegment.severity == SeverityEnum.alert),
|
||||
ReviewSegment.has_been_reviewed,
|
||||
)
|
||||
],
|
||||
@@ -173,7 +174,7 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == "detection"),
|
||||
(ReviewSegment.severity == SeverityEnum.detection),
|
||||
ReviewSegment.has_been_reviewed,
|
||||
)
|
||||
],
|
||||
@@ -185,7 +186,7 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == "alert"),
|
||||
(ReviewSegment.severity == SeverityEnum.alert),
|
||||
1,
|
||||
)
|
||||
],
|
||||
@@ -197,7 +198,7 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == "detection"),
|
||||
(ReviewSegment.severity == SeverityEnum.detection),
|
||||
1,
|
||||
)
|
||||
],
|
||||
@@ -230,6 +231,7 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
||||
label_clause = reduce(operator.or_, label_clauses)
|
||||
clauses.append((label_clause))
|
||||
|
||||
day_in_seconds = 60 * 60 * 24
|
||||
last_month = (
|
||||
ReviewSegment.select(
|
||||
fn.strftime(
|
||||
@@ -246,7 +248,7 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == "alert"),
|
||||
(ReviewSegment.severity == SeverityEnum.alert),
|
||||
ReviewSegment.has_been_reviewed,
|
||||
)
|
||||
],
|
||||
@@ -258,7 +260,7 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == "detection"),
|
||||
(ReviewSegment.severity == SeverityEnum.detection),
|
||||
ReviewSegment.has_been_reviewed,
|
||||
)
|
||||
],
|
||||
@@ -270,7 +272,7 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == "alert"),
|
||||
(ReviewSegment.severity == SeverityEnum.alert),
|
||||
1,
|
||||
)
|
||||
],
|
||||
@@ -282,7 +284,7 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == "detection"),
|
||||
(ReviewSegment.severity == SeverityEnum.detection),
|
||||
1,
|
||||
)
|
||||
],
|
||||
@@ -292,7 +294,7 @@ def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
||||
)
|
||||
.where(reduce(operator.and_, clauses))
|
||||
.group_by(
|
||||
(ReviewSegment.start_time + seconds_offset).cast("int") / (3600 * 24),
|
||||
(ReviewSegment.start_time + seconds_offset).cast("int") / day_in_seconds,
|
||||
)
|
||||
.order_by(ReviewSegment.start_time.desc())
|
||||
)
|
||||
@@ -362,7 +364,7 @@ def delete_reviews(body: ReviewModifyMultipleBody):
|
||||
ReviewSegment.delete().where(ReviewSegment.id << list_of_ids).execute()
|
||||
|
||||
return JSONResponse(
|
||||
content=({"success": True, "message": "Delete reviews"}), status_code=200
|
||||
content=({"success": True, "message": "Deleted review items."}), status_code=200
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -437,7 +437,7 @@ class FrigateApp:
|
||||
# pre-create shms
|
||||
for i in range(shm_frame_count):
|
||||
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
|
||||
self.frame_manager.create(f"{config.name}_{i}", frame_size)
|
||||
self.frame_manager.create(f"{config.name}_frame{i}", frame_size)
|
||||
|
||||
capture_process = util.Process(
|
||||
target=capture_camera,
|
||||
|
||||
@@ -38,6 +38,10 @@ class GenAICameraConfig(BaseModel):
|
||||
default_factory=list,
|
||||
title="List of required zones to be entered in order to run generative AI.",
|
||||
)
|
||||
debug_save_thumbnails: bool = Field(
|
||||
default=False,
|
||||
title="Save thumbnails sent to generative AI for debugging purposes.",
|
||||
)
|
||||
|
||||
@field_validator("required_zones", mode="before")
|
||||
@classmethod
|
||||
|
||||
@@ -74,6 +74,7 @@ class OnvifConfig(FrigateBaseModel):
|
||||
port: int = Field(default=8000, title="Onvif Port")
|
||||
user: Optional[EnvString] = Field(default=None, title="Onvif Username")
|
||||
password: Optional[EnvString] = Field(default=None, title="Onvif Password")
|
||||
tls_insecure: bool = Field(default=False, title="Onvif Disable TLS verification")
|
||||
autotracking: PtzAutotrackConfig = Field(
|
||||
default_factory=PtzAutotrackConfig,
|
||||
title="PTZ auto tracking config.",
|
||||
|
||||
@@ -4,6 +4,7 @@ from typing import Optional
|
||||
from pydantic import Field
|
||||
|
||||
from frigate.const import MAX_PRE_CAPTURE
|
||||
from frigate.review.types import SeverityEnum
|
||||
|
||||
from ..base import FrigateBaseModel
|
||||
|
||||
@@ -101,3 +102,15 @@ class RecordConfig(FrigateBaseModel):
|
||||
self.alerts.pre_capture,
|
||||
self.detections.pre_capture,
|
||||
)
|
||||
|
||||
def get_review_pre_capture(self, severity: SeverityEnum) -> int:
|
||||
if severity == SeverityEnum.alert:
|
||||
return self.alerts.pre_capture
|
||||
else:
|
||||
return self.detections.pre_capture
|
||||
|
||||
def get_review_post_capture(self, severity: SeverityEnum) -> int:
|
||||
if severity == SeverityEnum.alert:
|
||||
return self.alerts.post_capture
|
||||
else:
|
||||
return self.detections.post_capture
|
||||
|
||||
@@ -29,6 +29,7 @@ from frigate.util.builtin import (
|
||||
)
|
||||
from frigate.util.config import (
|
||||
StreamInfoRetriever,
|
||||
find_config_file,
|
||||
get_relative_coordinates,
|
||||
migrate_frigate_config,
|
||||
)
|
||||
@@ -67,7 +68,6 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
yaml = YAML()
|
||||
|
||||
DEFAULT_CONFIG_FILE = "/config/config.yml"
|
||||
DEFAULT_CONFIG = """
|
||||
mqtt:
|
||||
enabled: False
|
||||
@@ -638,16 +638,13 @@ class FrigateConfig(FrigateBaseModel):
|
||||
|
||||
@classmethod
|
||||
def load(cls, **kwargs):
|
||||
config_path = os.environ.get("CONFIG_FILE", DEFAULT_CONFIG_FILE)
|
||||
|
||||
if not os.path.isfile(config_path):
|
||||
config_path = config_path.replace("yml", "yaml")
|
||||
config_path = find_config_file()
|
||||
|
||||
# No configuration file found, create one.
|
||||
new_config = False
|
||||
if not os.path.isfile(config_path):
|
||||
logger.info("No config file found, saving default config")
|
||||
config_path = DEFAULT_CONFIG_FILE
|
||||
config_path = config_path
|
||||
new_config = True
|
||||
else:
|
||||
# Check if the config file needs to be migrated.
|
||||
|
||||
@@ -32,6 +32,7 @@ class DeepStack(DetectionApi):
|
||||
self.api_timeout = detector_config.api_timeout
|
||||
self.api_key = detector_config.api_key
|
||||
self.labels = detector_config.model.merged_labelmap
|
||||
self.session = requests.Session()
|
||||
|
||||
def get_label_index(self, label_value):
|
||||
if label_value.lower() == "truck":
|
||||
@@ -51,7 +52,7 @@ class DeepStack(DetectionApi):
|
||||
data = {"api_key": self.api_key}
|
||||
|
||||
try:
|
||||
response = requests.post(
|
||||
response = self.session.post(
|
||||
self.api_url,
|
||||
data=data,
|
||||
files={"image": image_bytes},
|
||||
|
||||
@@ -136,17 +136,17 @@ class Rknn(DetectionApi):
|
||||
def check_config(self, config):
|
||||
if (config.model.width != 320) or (config.model.height != 320):
|
||||
raise Exception(
|
||||
"Make sure to set the model width and height to 320 in your config.yml."
|
||||
"Make sure to set the model width and height to 320 in your config."
|
||||
)
|
||||
|
||||
if config.model.input_pixel_format != "bgr":
|
||||
raise Exception(
|
||||
'Make sure to set the model input_pixel_format to "bgr" in your config.yml.'
|
||||
'Make sure to set the model input_pixel_format to "bgr" in your config.'
|
||||
)
|
||||
|
||||
if config.model.input_tensor != "nhwc":
|
||||
raise Exception(
|
||||
'Make sure to set the model input_tensor to "nhwc" in your config.yml.'
|
||||
'Make sure to set the model input_tensor to "nhwc" in your config.'
|
||||
)
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
|
||||
@@ -5,6 +5,7 @@ import logging
|
||||
import os
|
||||
import threading
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
@@ -217,16 +218,47 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
_, buffer = cv2.imencode(".jpg", cropped_image)
|
||||
snapshot_image = buffer.tobytes()
|
||||
|
||||
num_thumbnails = len(self.tracked_events.get(event_id, []))
|
||||
|
||||
embed_image = (
|
||||
[snapshot_image]
|
||||
if event.has_snapshot and camera_config.genai.use_snapshot
|
||||
else (
|
||||
[thumbnail for data in self.tracked_events[event_id]]
|
||||
if len(self.tracked_events.get(event_id, [])) > 0
|
||||
[
|
||||
data["thumbnail"]
|
||||
for data in self.tracked_events[event_id]
|
||||
]
|
||||
if num_thumbnails > 0
|
||||
else [thumbnail]
|
||||
)
|
||||
)
|
||||
|
||||
if camera_config.genai.debug_save_thumbnails and num_thumbnails > 0:
|
||||
logger.debug(
|
||||
f"Saving {num_thumbnails} thumbnails for event {event.id}"
|
||||
)
|
||||
|
||||
Path(
|
||||
os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")
|
||||
).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for idx, data in enumerate(self.tracked_events[event_id], 1):
|
||||
jpg_bytes: bytes = data["thumbnail"]
|
||||
|
||||
if jpg_bytes is None:
|
||||
logger.warning(
|
||||
f"Unable to save thumbnail {idx} for {event.id}."
|
||||
)
|
||||
else:
|
||||
with open(
|
||||
os.path.join(
|
||||
CLIPS_DIR,
|
||||
f"genai-requests/{event.id}/{idx}.jpg",
|
||||
),
|
||||
"wb",
|
||||
) as j:
|
||||
j.write(jpg_bytes)
|
||||
|
||||
# Generate the description. Call happens in a thread since it is network bound.
|
||||
threading.Thread(
|
||||
target=self._embed_description,
|
||||
@@ -325,18 +357,25 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
)
|
||||
|
||||
if event.has_snapshot and source == "snapshot":
|
||||
with open(
|
||||
os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"),
|
||||
"rb",
|
||||
) as image_file:
|
||||
snapshot_file = os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg")
|
||||
|
||||
if not os.path.isfile(snapshot_file):
|
||||
logger.error(
|
||||
f"Cannot regenerate description for {event.id}, snapshot file not found: {snapshot_file}"
|
||||
)
|
||||
return
|
||||
|
||||
with open(snapshot_file, "rb") as image_file:
|
||||
snapshot_image = image_file.read()
|
||||
img = cv2.imdecode(
|
||||
np.frombuffer(snapshot_image, dtype=np.int8), cv2.IMREAD_COLOR
|
||||
)
|
||||
|
||||
# crop snapshot based on region before sending off to genai
|
||||
# provide full image if region doesn't exist (manual events)
|
||||
region = event.data.get("region", [0, 0, 1, 1])
|
||||
height, width = img.shape[:2]
|
||||
x1_rel, y1_rel, width_rel, height_rel = event.data["region"]
|
||||
x1_rel, y1_rel, width_rel, height_rel = region
|
||||
|
||||
x1, y1 = int(x1_rel * width), int(y1_rel * height)
|
||||
cropped_image = img[
|
||||
@@ -350,7 +389,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
[snapshot_image]
|
||||
if event.has_snapshot and source == "snapshot"
|
||||
else (
|
||||
[thumbnail for data in self.tracked_events[event_id]]
|
||||
[data["thumbnail"] for data in self.tracked_events[event_id]]
|
||||
if len(self.tracked_events.get(event_id, [])) > 0
|
||||
else [thumbnail]
|
||||
)
|
||||
|
||||
@@ -4,7 +4,6 @@ import datetime
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
from enum import Enum
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from pathlib import Path
|
||||
|
||||
@@ -16,11 +15,6 @@ from frigate.models import Event, Timeline
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EventCleanupType(str, Enum):
|
||||
clips = "clips"
|
||||
snapshots = "snapshots"
|
||||
|
||||
|
||||
CHUNK_SIZE = 50
|
||||
|
||||
|
||||
@@ -67,19 +61,11 @@ class EventCleanup(threading.Thread):
|
||||
|
||||
return self.camera_labels[camera]["labels"]
|
||||
|
||||
def expire(self, media_type: EventCleanupType) -> list[str]:
|
||||
def expire_snapshots(self) -> list[str]:
|
||||
## Expire events from unlisted cameras based on the global config
|
||||
if media_type == EventCleanupType.clips:
|
||||
expire_days = max(
|
||||
self.config.record.alerts.retain.days,
|
||||
self.config.record.detections.retain.days,
|
||||
)
|
||||
file_extension = None # mp4 clips are no longer stored in /clips
|
||||
update_params = {"has_clip": False}
|
||||
else:
|
||||
retain_config = self.config.snapshots.retain
|
||||
file_extension = "jpg"
|
||||
update_params = {"has_snapshot": False}
|
||||
retain_config = self.config.snapshots.retain
|
||||
file_extension = "jpg"
|
||||
update_params = {"has_snapshot": False}
|
||||
|
||||
distinct_labels = self.get_removed_camera_labels()
|
||||
|
||||
@@ -87,10 +73,7 @@ class EventCleanup(threading.Thread):
|
||||
# loop over object types in db
|
||||
for event in distinct_labels:
|
||||
# get expiration time for this label
|
||||
if media_type == EventCleanupType.snapshots:
|
||||
expire_days = retain_config.objects.get(
|
||||
event.label, retain_config.default
|
||||
)
|
||||
expire_days = retain_config.objects.get(event.label, retain_config.default)
|
||||
|
||||
expire_after = (
|
||||
datetime.datetime.now() - datetime.timedelta(days=expire_days)
|
||||
@@ -138,8 +121,8 @@ class EventCleanup(threading.Thread):
|
||||
|
||||
events_to_update = []
|
||||
|
||||
for batch in query.iterator():
|
||||
events_to_update.extend([event.id for event in batch])
|
||||
for event in query.iterator():
|
||||
events_to_update.append(event.id)
|
||||
if len(events_to_update) >= CHUNK_SIZE:
|
||||
logger.debug(
|
||||
f"Updating {update_params} for {len(events_to_update)} events"
|
||||
@@ -162,13 +145,7 @@ class EventCleanup(threading.Thread):
|
||||
|
||||
## Expire events from cameras based on the camera config
|
||||
for name, camera in self.config.cameras.items():
|
||||
if media_type == EventCleanupType.clips:
|
||||
expire_days = max(
|
||||
camera.record.alerts.retain.days,
|
||||
camera.record.detections.retain.days,
|
||||
)
|
||||
else:
|
||||
retain_config = camera.snapshots.retain
|
||||
retain_config = camera.snapshots.retain
|
||||
|
||||
# get distinct objects in database for this camera
|
||||
distinct_labels = self.get_camera_labels(name)
|
||||
@@ -176,10 +153,9 @@ class EventCleanup(threading.Thread):
|
||||
# loop over object types in db
|
||||
for event in distinct_labels:
|
||||
# get expiration time for this label
|
||||
if media_type == EventCleanupType.snapshots:
|
||||
expire_days = retain_config.objects.get(
|
||||
event.label, retain_config.default
|
||||
)
|
||||
expire_days = retain_config.objects.get(
|
||||
event.label, retain_config.default
|
||||
)
|
||||
|
||||
expire_after = (
|
||||
datetime.datetime.now() - datetime.timedelta(days=expire_days)
|
||||
@@ -206,19 +182,144 @@ class EventCleanup(threading.Thread):
|
||||
for event in expired_events:
|
||||
events_to_update.append(event.id)
|
||||
|
||||
if media_type == EventCleanupType.snapshots:
|
||||
try:
|
||||
media_name = f"{event.camera}-{event.id}"
|
||||
media_path = Path(
|
||||
f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
|
||||
)
|
||||
media_path.unlink(missing_ok=True)
|
||||
media_path = Path(
|
||||
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
|
||||
)
|
||||
media_path.unlink(missing_ok=True)
|
||||
except OSError as e:
|
||||
logger.warning(f"Unable to delete event images: {e}")
|
||||
try:
|
||||
media_name = f"{event.camera}-{event.id}"
|
||||
media_path = Path(
|
||||
f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
|
||||
)
|
||||
media_path.unlink(missing_ok=True)
|
||||
media_path = Path(
|
||||
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
|
||||
)
|
||||
media_path.unlink(missing_ok=True)
|
||||
except OSError as e:
|
||||
logger.warning(f"Unable to delete event images: {e}")
|
||||
|
||||
# update the clips attribute for the db entry
|
||||
for i in range(0, len(events_to_update), CHUNK_SIZE):
|
||||
batch = events_to_update[i : i + CHUNK_SIZE]
|
||||
logger.debug(f"Updating {update_params} for {len(batch)} events")
|
||||
Event.update(update_params).where(Event.id << batch).execute()
|
||||
|
||||
return events_to_update
|
||||
|
||||
def expire_clips(self) -> list[str]:
|
||||
## Expire events from unlisted cameras based on the global config
|
||||
expire_days = max(
|
||||
self.config.record.alerts.retain.days,
|
||||
self.config.record.detections.retain.days,
|
||||
)
|
||||
file_extension = None # mp4 clips are no longer stored in /clips
|
||||
update_params = {"has_clip": False}
|
||||
|
||||
# get expiration time for this label
|
||||
|
||||
expire_after = (
|
||||
datetime.datetime.now() - datetime.timedelta(days=expire_days)
|
||||
).timestamp()
|
||||
# grab all events after specific time
|
||||
expired_events: list[Event] = (
|
||||
Event.select(
|
||||
Event.id,
|
||||
Event.camera,
|
||||
)
|
||||
.where(
|
||||
Event.camera.not_in(self.camera_keys),
|
||||
Event.start_time < expire_after,
|
||||
Event.retain_indefinitely == False,
|
||||
)
|
||||
.namedtuples()
|
||||
.iterator()
|
||||
)
|
||||
logger.debug(f"{len(list(expired_events))} events can be expired")
|
||||
# delete the media from disk
|
||||
for expired in expired_events:
|
||||
media_name = f"{expired.camera}-{expired.id}"
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
|
||||
|
||||
try:
|
||||
media_path.unlink(missing_ok=True)
|
||||
if file_extension == "jpg":
|
||||
media_path = Path(
|
||||
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
|
||||
)
|
||||
media_path.unlink(missing_ok=True)
|
||||
except OSError as e:
|
||||
logger.warning(f"Unable to delete event images: {e}")
|
||||
|
||||
# update the clips attribute for the db entry
|
||||
query = Event.select(Event.id).where(
|
||||
Event.camera.not_in(self.camera_keys),
|
||||
Event.start_time < expire_after,
|
||||
Event.retain_indefinitely == False,
|
||||
)
|
||||
|
||||
events_to_update = []
|
||||
|
||||
for event in query.iterator():
|
||||
events_to_update.append(event.id)
|
||||
|
||||
if len(events_to_update) >= CHUNK_SIZE:
|
||||
logger.debug(
|
||||
f"Updating {update_params} for {len(events_to_update)} events"
|
||||
)
|
||||
Event.update(update_params).where(
|
||||
Event.id << events_to_update
|
||||
).execute()
|
||||
events_to_update = []
|
||||
|
||||
# Update any remaining events
|
||||
if events_to_update:
|
||||
logger.debug(
|
||||
f"Updating clips/snapshots attribute for {len(events_to_update)} events"
|
||||
)
|
||||
Event.update(update_params).where(Event.id << events_to_update).execute()
|
||||
|
||||
events_to_update = []
|
||||
now = datetime.datetime.now()
|
||||
|
||||
## Expire events from cameras based on the camera config
|
||||
for name, camera in self.config.cameras.items():
|
||||
expire_days = max(
|
||||
camera.record.alerts.retain.days,
|
||||
camera.record.detections.retain.days,
|
||||
)
|
||||
alert_expire_date = (
|
||||
now - datetime.timedelta(days=camera.record.alerts.retain.days)
|
||||
).timestamp()
|
||||
detection_expire_date = (
|
||||
now - datetime.timedelta(days=camera.record.detections.retain.days)
|
||||
).timestamp()
|
||||
# grab all events after specific time
|
||||
expired_events = (
|
||||
Event.select(
|
||||
Event.id,
|
||||
Event.camera,
|
||||
)
|
||||
.where(
|
||||
Event.camera == name,
|
||||
Event.retain_indefinitely == False,
|
||||
(
|
||||
(
|
||||
(Event.data["max_severity"] != "detection")
|
||||
| (Event.data["max_severity"].is_null())
|
||||
)
|
||||
& (Event.end_time < alert_expire_date)
|
||||
)
|
||||
| (
|
||||
(Event.data["max_severity"] == "detection")
|
||||
& (Event.end_time < detection_expire_date)
|
||||
),
|
||||
)
|
||||
.namedtuples()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
# delete the grabbed clips from disk
|
||||
# only snapshots are stored in /clips
|
||||
# so no need to delete mp4 files
|
||||
for event in expired_events:
|
||||
events_to_update.append(event.id)
|
||||
|
||||
# update the clips attribute for the db entry
|
||||
for i in range(0, len(events_to_update), CHUNK_SIZE):
|
||||
@@ -231,7 +332,7 @@ class EventCleanup(threading.Thread):
|
||||
def run(self) -> None:
|
||||
# only expire events every 5 minutes
|
||||
while not self.stop_event.wait(300):
|
||||
events_with_expired_clips = self.expire(EventCleanupType.clips)
|
||||
events_with_expired_clips = self.expire_clips()
|
||||
|
||||
# delete timeline entries for events that have expired recordings
|
||||
# delete up to 100,000 at a time
|
||||
@@ -242,7 +343,7 @@ class EventCleanup(threading.Thread):
|
||||
Timeline.source_id << deleted_events_list[i : i + max_deletes]
|
||||
).execute()
|
||||
|
||||
self.expire(EventCleanupType.snapshots)
|
||||
self.expire_snapshots()
|
||||
|
||||
# drop events from db where has_clip and has_snapshot are false
|
||||
events = (
|
||||
|
||||
@@ -82,18 +82,23 @@ class EventProcessor(threading.Thread):
|
||||
)
|
||||
|
||||
if source_type == EventTypeEnum.tracked_object:
|
||||
id = event_data["id"]
|
||||
self.timeline_queue.put(
|
||||
(
|
||||
camera,
|
||||
source_type,
|
||||
event_type,
|
||||
self.events_in_process.get(event_data["id"]),
|
||||
self.events_in_process.get(id),
|
||||
event_data,
|
||||
)
|
||||
)
|
||||
|
||||
if event_type == EventStateEnum.start:
|
||||
self.events_in_process[event_data["id"]] = event_data
|
||||
# if this is the first message, just store it and continue, its not time to insert it in the db
|
||||
if (
|
||||
event_type == EventStateEnum.start
|
||||
or id not in self.events_in_process
|
||||
):
|
||||
self.events_in_process[id] = event_data
|
||||
continue
|
||||
|
||||
self.handle_object_detection(event_type, camera, event_data)
|
||||
@@ -123,10 +128,6 @@ class EventProcessor(threading.Thread):
|
||||
"""handle tracked object event updates."""
|
||||
updated_db = False
|
||||
|
||||
# if this is the first message, just store it and continue, its not time to insert it in the db
|
||||
if event_type == EventStateEnum.start:
|
||||
self.events_in_process[event_data["id"]] = event_data
|
||||
|
||||
if should_update_db(self.events_in_process[event_data["id"]], event_data):
|
||||
updated_db = True
|
||||
camera_config = self.config.cameras[camera]
|
||||
@@ -210,6 +211,7 @@ class EventProcessor(threading.Thread):
|
||||
"top_score": event_data["top_score"],
|
||||
"attributes": attributes,
|
||||
"type": "object",
|
||||
"max_severity": event_data.get("max_severity"),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -38,6 +38,11 @@ class OllamaClient(GenAIClient):
|
||||
|
||||
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
|
||||
"""Submit a request to Ollama"""
|
||||
if self.provider is None:
|
||||
logger.warning(
|
||||
"Ollama provider has not been initialized, a description will not be generated. Check your Ollama configuration."
|
||||
)
|
||||
return None
|
||||
try:
|
||||
result = self.provider.generate(
|
||||
self.genai_config.model,
|
||||
|
||||
@@ -702,30 +702,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
return False
|
||||
|
||||
# If the object is not considered an alert or detection
|
||||
review_config = self.config.cameras[camera].review
|
||||
if not (
|
||||
(
|
||||
obj.obj_data["label"] in review_config.alerts.labels
|
||||
and (
|
||||
not review_config.alerts.required_zones
|
||||
or set(obj.entered_zones) & set(review_config.alerts.required_zones)
|
||||
)
|
||||
)
|
||||
or (
|
||||
(
|
||||
not review_config.detections.labels
|
||||
or obj.obj_data["label"] in review_config.detections.labels
|
||||
)
|
||||
and (
|
||||
not review_config.detections.required_zones
|
||||
or set(obj.entered_zones)
|
||||
& set(review_config.detections.required_zones)
|
||||
)
|
||||
)
|
||||
):
|
||||
logger.debug(
|
||||
f"Not creating clip for {obj.obj_data['id']} because it did not qualify as an alert or detection"
|
||||
)
|
||||
if obj.max_severity is None:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
import copy
|
||||
import logging
|
||||
import os
|
||||
import queue
|
||||
import threading
|
||||
import time
|
||||
@@ -29,11 +28,11 @@ from frigate.const import (
|
||||
AUTOTRACKING_ZOOM_EDGE_THRESHOLD,
|
||||
AUTOTRACKING_ZOOM_IN_HYSTERESIS,
|
||||
AUTOTRACKING_ZOOM_OUT_HYSTERESIS,
|
||||
CONFIG_DIR,
|
||||
)
|
||||
from frigate.ptz.onvif import OnvifController
|
||||
from frigate.track.tracked_object import TrackedObject
|
||||
from frigate.util.builtin import update_yaml_file
|
||||
from frigate.util.config import find_config_file
|
||||
from frigate.util.image import SharedMemoryFrameManager, intersection_over_union
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -328,13 +327,7 @@ class PtzAutoTracker:
|
||||
self.autotracker_init[camera] = True
|
||||
|
||||
def _write_config(self, camera):
|
||||
config_file = os.environ.get("CONFIG_FILE", f"{CONFIG_DIR}/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
config_file = find_config_file()
|
||||
|
||||
logger.debug(
|
||||
f"{camera}: Writing new config with autotracker motion coefficients: {self.config.cameras[camera].onvif.autotracking.movement_weights}"
|
||||
|
||||
@@ -6,6 +6,7 @@ from importlib.util import find_spec
|
||||
from pathlib import Path
|
||||
|
||||
import numpy
|
||||
import requests
|
||||
from onvif import ONVIFCamera, ONVIFError
|
||||
from zeep.exceptions import Fault, TransportError
|
||||
from zeep.transports import Transport
|
||||
@@ -48,7 +49,11 @@ class OnvifController:
|
||||
|
||||
if cam.onvif.host:
|
||||
try:
|
||||
transport = Transport(timeout=10, operation_timeout=10)
|
||||
session = requests.Session()
|
||||
session.verify = not cam.onvif.tls_insecure
|
||||
transport = Transport(
|
||||
timeout=10, operation_timeout=10, session=session
|
||||
)
|
||||
self.cams[cam_name] = {
|
||||
"onvif": ONVIFCamera(
|
||||
cam.onvif.host,
|
||||
@@ -558,22 +563,26 @@ class OnvifController:
|
||||
if not self._init_onvif(camera_name):
|
||||
return
|
||||
|
||||
if command == OnvifCommandEnum.init:
|
||||
# already init
|
||||
return
|
||||
elif command == OnvifCommandEnum.stop:
|
||||
self._stop(camera_name)
|
||||
elif command == OnvifCommandEnum.preset:
|
||||
self._move_to_preset(camera_name, param)
|
||||
elif command == OnvifCommandEnum.move_relative:
|
||||
_, pan, tilt = param.split("_")
|
||||
self._move_relative(camera_name, float(pan), float(tilt), 0, 1)
|
||||
elif (
|
||||
command == OnvifCommandEnum.zoom_in or command == OnvifCommandEnum.zoom_out
|
||||
):
|
||||
self._zoom(camera_name, command)
|
||||
else:
|
||||
self._move(camera_name, command)
|
||||
try:
|
||||
if command == OnvifCommandEnum.init:
|
||||
# already init
|
||||
return
|
||||
elif command == OnvifCommandEnum.stop:
|
||||
self._stop(camera_name)
|
||||
elif command == OnvifCommandEnum.preset:
|
||||
self._move_to_preset(camera_name, param)
|
||||
elif command == OnvifCommandEnum.move_relative:
|
||||
_, pan, tilt = param.split("_")
|
||||
self._move_relative(camera_name, float(pan), float(tilt), 0, 1)
|
||||
elif (
|
||||
command == OnvifCommandEnum.zoom_in
|
||||
or command == OnvifCommandEnum.zoom_out
|
||||
):
|
||||
self._zoom(camera_name, command)
|
||||
else:
|
||||
self._move(camera_name, command)
|
||||
except ONVIFError as e:
|
||||
logger.error(f"Unable to handle onvif command: {e}")
|
||||
|
||||
def get_camera_info(self, camera_name: str) -> dict[str, any]:
|
||||
if camera_name not in self.cams.keys():
|
||||
|
||||
@@ -29,6 +29,7 @@ from frigate.const import (
|
||||
RECORD_DIR,
|
||||
)
|
||||
from frigate.models import Recordings, ReviewSegment
|
||||
from frigate.review.types import SeverityEnum
|
||||
from frigate.util.services import get_video_properties
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -194,6 +195,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
ReviewSegment.select(
|
||||
ReviewSegment.start_time,
|
||||
ReviewSegment.end_time,
|
||||
ReviewSegment.severity,
|
||||
ReviewSegment.data,
|
||||
)
|
||||
.where(
|
||||
@@ -219,11 +221,15 @@ class RecordingMaintainer(threading.Thread):
|
||||
[r for r in recordings_to_insert if r is not None],
|
||||
)
|
||||
|
||||
def drop_segment(self, cache_path: str) -> None:
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
|
||||
async def validate_and_move_segment(
|
||||
self, camera: str, reviews: list[ReviewSegment], recording: dict[str, any]
|
||||
) -> None:
|
||||
cache_path = recording["cache_path"]
|
||||
start_time = recording["start_time"]
|
||||
cache_path: str = recording["cache_path"]
|
||||
start_time: datetime.datetime = recording["start_time"]
|
||||
record_config = self.config.cameras[camera].record
|
||||
|
||||
# Just delete files if recordings are turned off
|
||||
@@ -231,8 +237,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
camera not in self.config.cameras
|
||||
or not self.config.cameras[camera].record.enabled
|
||||
):
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
self.drop_segment(cache_path)
|
||||
return
|
||||
|
||||
if cache_path in self.end_time_cache:
|
||||
@@ -260,24 +265,34 @@ class RecordingMaintainer(threading.Thread):
|
||||
return
|
||||
|
||||
# if cached file's start_time is earlier than the retain days for the camera
|
||||
# meaning continuous recording is not enabled
|
||||
if start_time <= (
|
||||
datetime.datetime.now().astimezone(datetime.timezone.utc)
|
||||
- datetime.timedelta(days=self.config.cameras[camera].record.retain.days)
|
||||
):
|
||||
# if the cached segment overlaps with the events:
|
||||
# if the cached segment overlaps with the review items:
|
||||
overlaps = False
|
||||
for review in reviews:
|
||||
# if the event starts in the future, stop checking events
|
||||
severity = SeverityEnum[review.severity]
|
||||
|
||||
# if the review item starts in the future, stop checking review items
|
||||
# and remove this segment
|
||||
if review.start_time > end_time.timestamp():
|
||||
if (
|
||||
review.start_time - record_config.get_review_pre_capture(severity)
|
||||
) > end_time.timestamp():
|
||||
overlaps = False
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
break
|
||||
|
||||
# if the event is in progress or ends after the recording starts, keep it
|
||||
# and stop looking at events
|
||||
if review.end_time is None or review.end_time >= start_time.timestamp():
|
||||
# if the review item is in progress or ends after the recording starts, keep it
|
||||
# and stop looking at review items
|
||||
if (
|
||||
review.end_time is None
|
||||
or (
|
||||
review.end_time
|
||||
+ record_config.get_review_post_capture(severity)
|
||||
)
|
||||
>= start_time.timestamp()
|
||||
):
|
||||
overlaps = True
|
||||
break
|
||||
|
||||
@@ -296,7 +311,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
cache_path,
|
||||
record_mode,
|
||||
)
|
||||
# if it doesn't overlap with an event, go ahead and drop the segment
|
||||
# if it doesn't overlap with an review item, go ahead and drop the segment
|
||||
# if it ends more than the configured pre_capture for the camera
|
||||
else:
|
||||
camera_info = self.object_recordings_info[camera]
|
||||
@@ -307,9 +322,9 @@ class RecordingMaintainer(threading.Thread):
|
||||
most_recently_processed_frame_time - record_config.event_pre_capture
|
||||
).astimezone(datetime.timezone.utc)
|
||||
if end_time < retain_cutoff:
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
self.drop_segment(cache_path)
|
||||
# else retain days includes this segment
|
||||
# meaning continuous recording is enabled
|
||||
else:
|
||||
# assume that empty means the relevant recording info has not been received yet
|
||||
camera_info = self.object_recordings_info[camera]
|
||||
@@ -390,8 +405,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
|
||||
# check if the segment shouldn't be stored
|
||||
if segment_info.should_discard_segment(store_mode):
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
self.drop_segment(cache_path)
|
||||
return
|
||||
|
||||
# directory will be in utc due to start_time being in utc
|
||||
|
||||
@@ -7,7 +7,6 @@ import random
|
||||
import string
|
||||
import sys
|
||||
import threading
|
||||
from enum import Enum
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
@@ -27,6 +26,7 @@ from frigate.const import (
|
||||
from frigate.events.external import ManualEventState
|
||||
from frigate.models import ReviewSegment
|
||||
from frigate.object_processing import TrackedObject
|
||||
from frigate.review.types import SeverityEnum
|
||||
from frigate.util.image import SharedMemoryFrameManager, calculate_16_9_crop
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -39,11 +39,6 @@ THRESHOLD_ALERT_ACTIVITY = 120
|
||||
THRESHOLD_DETECTION_ACTIVITY = 30
|
||||
|
||||
|
||||
class SeverityEnum(str, Enum):
|
||||
alert = "alert"
|
||||
detection = "detection"
|
||||
|
||||
|
||||
class PendingReviewSegment:
|
||||
def __init__(
|
||||
self,
|
||||
|
||||
6
frigate/review/types.py
Normal file
6
frigate/review/types.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class SeverityEnum(str, Enum):
|
||||
alert = "alert"
|
||||
detection = "detection"
|
||||
@@ -293,7 +293,7 @@ def stats_snapshot(
|
||||
for path in [RECORD_DIR, CLIPS_DIR, CACHE_DIR, "/dev/shm"]:
|
||||
try:
|
||||
storage_stats = shutil.disk_usage(path)
|
||||
except FileNotFoundError:
|
||||
except (FileNotFoundError, OSError):
|
||||
stats["service"]["storage"][path] = {}
|
||||
continue
|
||||
|
||||
|
||||
@@ -17,6 +17,8 @@ bandwidth_equation = Recordings.segment_size / (
|
||||
Recordings.end_time - Recordings.start_time
|
||||
)
|
||||
|
||||
MAX_CALCULATED_BANDWIDTH = 10000 # 10Gb/hr
|
||||
|
||||
|
||||
class StorageMaintainer(threading.Thread):
|
||||
"""Maintain frigates recording storage."""
|
||||
@@ -52,6 +54,12 @@ class StorageMaintainer(threading.Thread):
|
||||
* 3600,
|
||||
2,
|
||||
)
|
||||
|
||||
if bandwidth > MAX_CALCULATED_BANDWIDTH:
|
||||
logger.warning(
|
||||
f"{camera} has a bandwidth of {bandwidth} MB/hr which exceeds the expected maximum. This typically indicates an issue with the cameras recordings."
|
||||
)
|
||||
bandwidth = MAX_CALCULATED_BANDWIDTH
|
||||
except TypeError:
|
||||
bandwidth = 0
|
||||
|
||||
|
||||
@@ -9,8 +9,8 @@ from playhouse.sqliteq import SqliteQueueDatabase
|
||||
|
||||
from frigate.api.fastapi_app import create_fastapi_app
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.models import Event, ReviewSegment
|
||||
from frigate.review.maintainer import SeverityEnum
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.review.types import SeverityEnum
|
||||
from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS
|
||||
|
||||
|
||||
@@ -146,17 +146,35 @@ class BaseTestHttp(unittest.TestCase):
|
||||
def insert_mock_review_segment(
|
||||
self,
|
||||
id: str,
|
||||
start_time: datetime.datetime = datetime.datetime.now().timestamp(),
|
||||
end_time: datetime.datetime = datetime.datetime.now().timestamp() + 20,
|
||||
start_time: float = datetime.datetime.now().timestamp(),
|
||||
end_time: float = datetime.datetime.now().timestamp() + 20,
|
||||
severity: SeverityEnum = SeverityEnum.alert,
|
||||
has_been_reviewed: bool = False,
|
||||
) -> Event:
|
||||
"""Inserts a basic event model with a given id."""
|
||||
"""Inserts a review segment model with a given id."""
|
||||
return ReviewSegment.insert(
|
||||
id=id,
|
||||
camera="front_door",
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
has_been_reviewed=False,
|
||||
severity=SeverityEnum.alert,
|
||||
has_been_reviewed=has_been_reviewed,
|
||||
severity=severity,
|
||||
thumb_path=False,
|
||||
data={},
|
||||
).execute()
|
||||
|
||||
def insert_mock_recording(
|
||||
self,
|
||||
id: str,
|
||||
start_time: float = datetime.datetime.now().timestamp(),
|
||||
end_time: float = datetime.datetime.now().timestamp() + 20,
|
||||
) -> Event:
|
||||
"""Inserts a recording model with a given id."""
|
||||
return Recordings.insert(
|
||||
id=id,
|
||||
path=id,
|
||||
camera="front_door",
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
duration=end_time - start_time,
|
||||
).execute()
|
||||
|
||||
@@ -1,76 +1,89 @@
|
||||
import datetime
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from frigate.models import Event, ReviewSegment
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.review.types import SeverityEnum
|
||||
from frigate.test.http_api.base_http_test import BaseTestHttp
|
||||
|
||||
|
||||
class TestHttpReview(BaseTestHttp):
|
||||
def setUp(self):
|
||||
super().setUp([Event, ReviewSegment])
|
||||
super().setUp([Event, Recordings, ReviewSegment])
|
||||
self.app = super().create_app()
|
||||
|
||||
def _get_reviews(self, ids: list[str]):
|
||||
return list(
|
||||
ReviewSegment.select(ReviewSegment.id)
|
||||
.where(ReviewSegment.id.in_(ids))
|
||||
.execute()
|
||||
)
|
||||
|
||||
def _get_recordings(self, ids: list[str]):
|
||||
return list(
|
||||
Recordings.select(Recordings.id).where(Recordings.id.in_(ids)).execute()
|
||||
)
|
||||
|
||||
####################################################################################################################
|
||||
################################### GET /review Endpoint ########################################################
|
||||
####################################################################################################################
|
||||
|
||||
# Does not return any data point since the end time (before parameter) is not passed and the review segment end_time is 2 seconds from now
|
||||
def test_get_review_no_filters_no_matches(self):
|
||||
app = super().create_app()
|
||||
now = datetime.datetime.now().timestamp()
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(app) as client:
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random", now, now + 2)
|
||||
reviews_response = client.get("/review")
|
||||
assert reviews_response.status_code == 200
|
||||
reviews_in_response = reviews_response.json()
|
||||
assert len(reviews_in_response) == 0
|
||||
response = client.get("/review")
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert len(response_json) == 0
|
||||
|
||||
def test_get_review_no_filters(self):
|
||||
app = super().create_app()
|
||||
now = datetime.datetime.now().timestamp()
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(app) as client:
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random", now - 2, now - 1)
|
||||
reviews_response = client.get("/review")
|
||||
assert reviews_response.status_code == 200
|
||||
reviews_in_response = reviews_response.json()
|
||||
assert len(reviews_in_response) == 1
|
||||
response = client.get("/review")
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert len(response_json) == 1
|
||||
|
||||
def test_get_review_with_time_filter_no_matches(self):
|
||||
app = super().create_app()
|
||||
now = datetime.datetime.now().timestamp()
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(app) as client:
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id, now, now + 2)
|
||||
params = {
|
||||
"after": now,
|
||||
"before": now + 3,
|
||||
}
|
||||
reviews_response = client.get("/review", params=params)
|
||||
assert reviews_response.status_code == 200
|
||||
reviews_in_response = reviews_response.json()
|
||||
assert len(reviews_in_response) == 0
|
||||
response = client.get("/review", params=params)
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert len(response_json) == 0
|
||||
|
||||
def test_get_review_with_time_filter(self):
|
||||
app = super().create_app()
|
||||
now = datetime.datetime.now().timestamp()
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(app) as client:
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id, now, now + 2)
|
||||
params = {
|
||||
"after": now - 1,
|
||||
"before": now + 3,
|
||||
}
|
||||
reviews_response = client.get("/review", params=params)
|
||||
assert reviews_response.status_code == 200
|
||||
reviews_in_response = reviews_response.json()
|
||||
assert len(reviews_in_response) == 1
|
||||
assert reviews_in_response[0]["id"] == id
|
||||
response = client.get("/review", params=params)
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert len(response_json) == 1
|
||||
assert response_json[0]["id"] == id
|
||||
|
||||
def test_get_review_with_limit_filter(self):
|
||||
app = super().create_app()
|
||||
now = datetime.datetime.now().timestamp()
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(app) as client:
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
id2 = "654321.random"
|
||||
super().insert_mock_review_segment(id, now, now + 2)
|
||||
@@ -80,17 +93,49 @@ class TestHttpReview(BaseTestHttp):
|
||||
"after": now,
|
||||
"before": now + 3,
|
||||
}
|
||||
reviews_response = client.get("/review", params=params)
|
||||
assert reviews_response.status_code == 200
|
||||
reviews_in_response = reviews_response.json()
|
||||
assert len(reviews_in_response) == 1
|
||||
assert reviews_in_response[0]["id"] == id2
|
||||
response = client.get("/review", params=params)
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert len(response_json) == 1
|
||||
assert response_json[0]["id"] == id2
|
||||
|
||||
def test_get_review_with_severity_filters_no_matches(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id, now, now + 2, SeverityEnum.detection)
|
||||
params = {
|
||||
"severity": "detection",
|
||||
"after": now - 1,
|
||||
"before": now + 3,
|
||||
}
|
||||
response = client.get("/review", params=params)
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert len(response_json) == 1
|
||||
assert response_json[0]["id"] == id
|
||||
|
||||
def test_get_review_with_severity_filters(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id, now, now + 2, SeverityEnum.detection)
|
||||
params = {
|
||||
"severity": "alert",
|
||||
"after": now - 1,
|
||||
"before": now + 3,
|
||||
}
|
||||
response = client.get("/review", params=params)
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert len(response_json) == 0
|
||||
|
||||
def test_get_review_with_all_filters(self):
|
||||
app = super().create_app()
|
||||
now = datetime.datetime.now().timestamp()
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(app) as client:
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id, now, now + 2)
|
||||
params = {
|
||||
@@ -103,8 +148,424 @@ class TestHttpReview(BaseTestHttp):
|
||||
"after": now - 1,
|
||||
"before": now + 3,
|
||||
}
|
||||
reviews_response = client.get("/review", params=params)
|
||||
assert reviews_response.status_code == 200
|
||||
reviews_in_response = reviews_response.json()
|
||||
assert len(reviews_in_response) == 1
|
||||
assert reviews_in_response[0]["id"] == id
|
||||
response = client.get("/review", params=params)
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert len(response_json) == 1
|
||||
assert response_json[0]["id"] == id
|
||||
|
||||
####################################################################################################################
|
||||
################################### GET /review/summary Endpoint #################################################
|
||||
####################################################################################################################
|
||||
def test_get_review_summary_all_filters(self):
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random")
|
||||
params = {
|
||||
"cameras": "front_door",
|
||||
"labels": "all",
|
||||
"zones": "all",
|
||||
"timezone": "utc",
|
||||
}
|
||||
response = client.get("/review/summary", params=params)
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
# e.g. '2024-11-24'
|
||||
today_formatted = datetime.today().strftime("%Y-%m-%d")
|
||||
expected_response = {
|
||||
"last24Hours": {
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 1,
|
||||
"total_detection": 0,
|
||||
},
|
||||
today_formatted: {
|
||||
"day": today_formatted,
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 1,
|
||||
"total_detection": 0,
|
||||
},
|
||||
}
|
||||
self.assertEqual(response_json, expected_response)
|
||||
|
||||
def test_get_review_summary_no_filters(self):
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random")
|
||||
response = client.get("/review/summary")
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
# e.g. '2024-11-24'
|
||||
today_formatted = datetime.today().strftime("%Y-%m-%d")
|
||||
expected_response = {
|
||||
"last24Hours": {
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 1,
|
||||
"total_detection": 0,
|
||||
},
|
||||
today_formatted: {
|
||||
"day": today_formatted,
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 1,
|
||||
"total_detection": 0,
|
||||
},
|
||||
}
|
||||
self.assertEqual(response_json, expected_response)
|
||||
|
||||
def test_get_review_summary_multiple_days(self):
|
||||
now = datetime.now()
|
||||
five_days_ago = datetime.today() - timedelta(days=5)
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_review_segment(
|
||||
"123456.random", now.timestamp() - 2, now.timestamp() - 1
|
||||
)
|
||||
super().insert_mock_review_segment(
|
||||
"654321.random",
|
||||
five_days_ago.timestamp(),
|
||||
five_days_ago.timestamp() + 1,
|
||||
)
|
||||
response = client.get("/review/summary")
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
# e.g. '2024-11-24'
|
||||
today_formatted = now.strftime("%Y-%m-%d")
|
||||
# e.g. '2024-11-19'
|
||||
five_days_ago_formatted = five_days_ago.strftime("%Y-%m-%d")
|
||||
expected_response = {
|
||||
"last24Hours": {
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 1,
|
||||
"total_detection": 0,
|
||||
},
|
||||
today_formatted: {
|
||||
"day": today_formatted,
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 1,
|
||||
"total_detection": 0,
|
||||
},
|
||||
five_days_ago_formatted: {
|
||||
"day": five_days_ago_formatted,
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 1,
|
||||
"total_detection": 0,
|
||||
},
|
||||
}
|
||||
self.assertEqual(response_json, expected_response)
|
||||
|
||||
def test_get_review_summary_multiple_days_edge_cases(self):
|
||||
now = datetime.now()
|
||||
five_days_ago = datetime.today() - timedelta(days=5)
|
||||
twenty_days_ago = datetime.today() - timedelta(days=20)
|
||||
one_month_ago = datetime.today() - timedelta(days=30)
|
||||
one_month_ago_ts = one_month_ago.timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random", now.timestamp())
|
||||
super().insert_mock_review_segment(
|
||||
"123457.random", five_days_ago.timestamp()
|
||||
)
|
||||
super().insert_mock_review_segment(
|
||||
"123458.random",
|
||||
twenty_days_ago.timestamp(),
|
||||
None,
|
||||
SeverityEnum.detection,
|
||||
)
|
||||
# One month ago plus 5 seconds fits within the condition (review.start_time > month_ago). Assuming that the endpoint does not take more than 5 seconds to be invoked
|
||||
super().insert_mock_review_segment(
|
||||
"123459.random",
|
||||
one_month_ago_ts + 5,
|
||||
None,
|
||||
SeverityEnum.detection,
|
||||
)
|
||||
# This won't appear in the output since it's not within last month start_time clause (review.start_time > month_ago)
|
||||
super().insert_mock_review_segment("123450.random", one_month_ago_ts)
|
||||
response = client.get("/review/summary")
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
# e.g. '2024-11-24'
|
||||
today_formatted = now.strftime("%Y-%m-%d")
|
||||
# e.g. '2024-11-19'
|
||||
five_days_ago_formatted = five_days_ago.strftime("%Y-%m-%d")
|
||||
# e.g. '2024-11-04'
|
||||
twenty_days_ago_formatted = twenty_days_ago.strftime("%Y-%m-%d")
|
||||
# e.g. '2024-10-24'
|
||||
one_month_ago_formatted = one_month_ago.strftime("%Y-%m-%d")
|
||||
expected_response = {
|
||||
"last24Hours": {
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 1,
|
||||
"total_detection": 0,
|
||||
},
|
||||
today_formatted: {
|
||||
"day": today_formatted,
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 1,
|
||||
"total_detection": 0,
|
||||
},
|
||||
five_days_ago_formatted: {
|
||||
"day": five_days_ago_formatted,
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 1,
|
||||
"total_detection": 0,
|
||||
},
|
||||
twenty_days_ago_formatted: {
|
||||
"day": twenty_days_ago_formatted,
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 0,
|
||||
"total_detection": 1,
|
||||
},
|
||||
one_month_ago_formatted: {
|
||||
"day": one_month_ago_formatted,
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 0,
|
||||
"total_detection": 1,
|
||||
},
|
||||
}
|
||||
self.assertEqual(response_json, expected_response)
|
||||
|
||||
def test_get_review_summary_multiple_in_same_day(self):
|
||||
now = datetime.now()
|
||||
five_days_ago = datetime.today() - timedelta(days=5)
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random", now.timestamp())
|
||||
five_days_ago_ts = five_days_ago.timestamp()
|
||||
for i in range(20):
|
||||
super().insert_mock_review_segment(
|
||||
f"123456_{i}.random_alert",
|
||||
five_days_ago_ts,
|
||||
five_days_ago_ts,
|
||||
SeverityEnum.alert,
|
||||
)
|
||||
for i in range(15):
|
||||
super().insert_mock_review_segment(
|
||||
f"123456_{i}.random_detection",
|
||||
five_days_ago_ts,
|
||||
five_days_ago_ts,
|
||||
SeverityEnum.detection,
|
||||
)
|
||||
response = client.get("/review/summary")
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
# e.g. '2024-11-24'
|
||||
today_formatted = now.strftime("%Y-%m-%d")
|
||||
# e.g. '2024-11-19'
|
||||
five_days_ago_formatted = five_days_ago.strftime("%Y-%m-%d")
|
||||
expected_response = {
|
||||
"last24Hours": {
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 1,
|
||||
"total_detection": 0,
|
||||
},
|
||||
today_formatted: {
|
||||
"day": today_formatted,
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 1,
|
||||
"total_detection": 0,
|
||||
},
|
||||
five_days_ago_formatted: {
|
||||
"day": five_days_ago_formatted,
|
||||
"reviewed_alert": 0,
|
||||
"reviewed_detection": 0,
|
||||
"total_alert": 20,
|
||||
"total_detection": 15,
|
||||
},
|
||||
}
|
||||
self.assertEqual(response_json, expected_response)
|
||||
|
||||
def test_get_review_summary_multiple_in_same_day_with_reviewed(self):
|
||||
five_days_ago = datetime.today() - timedelta(days=5)
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
five_days_ago_ts = five_days_ago.timestamp()
|
||||
for i in range(10):
|
||||
super().insert_mock_review_segment(
|
||||
f"123456_{i}.random_alert_not_reviewed",
|
||||
five_days_ago_ts,
|
||||
five_days_ago_ts,
|
||||
SeverityEnum.alert,
|
||||
False,
|
||||
)
|
||||
for i in range(10):
|
||||
super().insert_mock_review_segment(
|
||||
f"123456_{i}.random_alert_reviewed",
|
||||
five_days_ago_ts,
|
||||
five_days_ago_ts,
|
||||
SeverityEnum.alert,
|
||||
True,
|
||||
)
|
||||
for i in range(10):
|
||||
super().insert_mock_review_segment(
|
||||
f"123456_{i}.random_detection_not_reviewed",
|
||||
five_days_ago_ts,
|
||||
five_days_ago_ts,
|
||||
SeverityEnum.detection,
|
||||
False,
|
||||
)
|
||||
for i in range(5):
|
||||
super().insert_mock_review_segment(
|
||||
f"123456_{i}.random_detection_reviewed",
|
||||
five_days_ago_ts,
|
||||
five_days_ago_ts,
|
||||
SeverityEnum.detection,
|
||||
True,
|
||||
)
|
||||
response = client.get("/review/summary")
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
# e.g. '2024-11-19'
|
||||
five_days_ago_formatted = five_days_ago.strftime("%Y-%m-%d")
|
||||
expected_response = {
|
||||
"last24Hours": {
|
||||
"reviewed_alert": None,
|
||||
"reviewed_detection": None,
|
||||
"total_alert": None,
|
||||
"total_detection": None,
|
||||
},
|
||||
five_days_ago_formatted: {
|
||||
"day": five_days_ago_formatted,
|
||||
"reviewed_alert": 10,
|
||||
"reviewed_detection": 5,
|
||||
"total_alert": 20,
|
||||
"total_detection": 15,
|
||||
},
|
||||
}
|
||||
self.assertEqual(response_json, expected_response)
|
||||
|
||||
####################################################################################################################
|
||||
################################### POST reviews/viewed Endpoint ################################################
|
||||
####################################################################################################################
|
||||
def test_post_reviews_viewed_no_body(self):
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random")
|
||||
response = client.post("/reviews/viewed")
|
||||
# Missing ids
|
||||
assert response.status_code == 422
|
||||
|
||||
def test_post_reviews_viewed_no_body_ids(self):
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random")
|
||||
body = {"ids": [""]}
|
||||
response = client.post("/reviews/viewed", json=body)
|
||||
# Missing ids
|
||||
assert response.status_code == 422
|
||||
|
||||
def test_post_reviews_viewed_non_existent_id(self):
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id)
|
||||
body = {"ids": ["1"]}
|
||||
response = client.post("/reviews/viewed", json=body)
|
||||
assert response.status_code == 200
|
||||
response = response.json()
|
||||
assert response["success"] == True
|
||||
assert response["message"] == "Reviewed multiple items"
|
||||
# Verify that in DB the review segment was not changed
|
||||
review_segment_in_db = (
|
||||
ReviewSegment.select(ReviewSegment.has_been_reviewed)
|
||||
.where(ReviewSegment.id == id)
|
||||
.get()
|
||||
)
|
||||
assert review_segment_in_db.has_been_reviewed == False
|
||||
|
||||
def test_post_reviews_viewed(self):
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id)
|
||||
body = {"ids": [id]}
|
||||
response = client.post("/reviews/viewed", json=body)
|
||||
assert response.status_code == 200
|
||||
response = response.json()
|
||||
assert response["success"] == True
|
||||
assert response["message"] == "Reviewed multiple items"
|
||||
# Verify that in DB the review segment was changed
|
||||
review_segment_in_db = (
|
||||
ReviewSegment.select(ReviewSegment.has_been_reviewed)
|
||||
.where(ReviewSegment.id == id)
|
||||
.get()
|
||||
)
|
||||
assert review_segment_in_db.has_been_reviewed == True
|
||||
|
||||
####################################################################################################################
|
||||
################################### POST reviews/delete Endpoint ################################################
|
||||
####################################################################################################################
|
||||
def test_post_reviews_delete_no_body(self):
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random")
|
||||
response = client.post("/reviews/delete")
|
||||
# Missing ids
|
||||
assert response.status_code == 422
|
||||
|
||||
def test_post_reviews_delete_no_body_ids(self):
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random")
|
||||
body = {"ids": [""]}
|
||||
response = client.post("/reviews/delete", json=body)
|
||||
# Missing ids
|
||||
assert response.status_code == 422
|
||||
|
||||
def test_post_reviews_delete_non_existent_id(self):
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id)
|
||||
body = {"ids": ["1"]}
|
||||
response = client.post("/reviews/delete", json=body)
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert response_json["success"] == True
|
||||
assert response_json["message"] == "Deleted review items."
|
||||
# Verify that in DB the review segment was not deleted
|
||||
review_ids_in_db_after = self._get_reviews([id])
|
||||
assert len(review_ids_in_db_after) == 1
|
||||
assert review_ids_in_db_after[0].id == id
|
||||
|
||||
def test_post_reviews_delete(self):
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id)
|
||||
body = {"ids": [id]}
|
||||
response = client.post("/reviews/delete", json=body)
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert response_json["success"] == True
|
||||
assert response_json["message"] == "Deleted review items."
|
||||
# Verify that in DB the review segment was deleted
|
||||
review_ids_in_db_after = self._get_reviews([id])
|
||||
assert len(review_ids_in_db_after) == 0
|
||||
|
||||
def test_post_reviews_delete_many(self):
|
||||
with TestClient(self.app) as client:
|
||||
ids = ["123456.random", "654321.random"]
|
||||
for id in ids:
|
||||
super().insert_mock_review_segment(id)
|
||||
super().insert_mock_recording(id)
|
||||
|
||||
review_ids_in_db_before = self._get_reviews(ids)
|
||||
recordings_ids_in_db_before = self._get_recordings(ids)
|
||||
assert len(review_ids_in_db_before) == 2
|
||||
assert len(recordings_ids_in_db_before) == 2
|
||||
|
||||
body = {"ids": ids}
|
||||
response = client.post("/reviews/delete", json=body)
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert response_json["success"] == True
|
||||
assert response_json["message"] == "Deleted review items."
|
||||
|
||||
# Verify that in DB all review segments and recordings that were passed were deleted
|
||||
review_ids_in_db_after = self._get_reviews(ids)
|
||||
recording_ids_in_db_after = self._get_recordings(ids)
|
||||
assert len(review_ids_in_db_after) == 0
|
||||
assert len(recording_ids_in_db_after) == 0
|
||||
|
||||
@@ -168,7 +168,7 @@ class TestHttp(unittest.TestCase):
|
||||
|
||||
assert event
|
||||
assert event["id"] == id
|
||||
assert event == model_to_dict(Event.get(Event.id == id))
|
||||
assert event["id"] == model_to_dict(Event.get(Event.id == id))["id"]
|
||||
|
||||
def test_get_bad_event(self):
|
||||
app = create_fastapi_app(
|
||||
|
||||
@@ -13,6 +13,7 @@ from frigate.config import (
|
||||
CameraConfig,
|
||||
ModelConfig,
|
||||
)
|
||||
from frigate.review.types import SeverityEnum
|
||||
from frigate.util.image import (
|
||||
area,
|
||||
calculate_region,
|
||||
@@ -59,6 +60,27 @@ class TrackedObject:
|
||||
self.pending_loitering = False
|
||||
self.previous = self.to_dict()
|
||||
|
||||
@property
|
||||
def max_severity(self) -> Optional[str]:
|
||||
review_config = self.camera_config.review
|
||||
|
||||
if self.obj_data["label"] in review_config.alerts.labels and (
|
||||
not review_config.alerts.required_zones
|
||||
or set(self.entered_zones) & set(review_config.alerts.required_zones)
|
||||
):
|
||||
return SeverityEnum.alert
|
||||
|
||||
if (
|
||||
not review_config.detections.labels
|
||||
or self.obj_data["label"] in review_config.detections.labels
|
||||
) and (
|
||||
not review_config.detections.required_zones
|
||||
or set(self.entered_zones) & set(review_config.detections.required_zones)
|
||||
):
|
||||
return SeverityEnum.detection
|
||||
|
||||
return None
|
||||
|
||||
def _is_false_positive(self):
|
||||
# once a true positive, always a true positive
|
||||
if not self.false_positive:
|
||||
@@ -232,6 +254,7 @@ class TrackedObject:
|
||||
"attributes": self.attributes,
|
||||
"current_attributes": self.obj_data["attributes"],
|
||||
"pending_loitering": self.pending_loitering,
|
||||
"max_severity": self.max_severity,
|
||||
}
|
||||
|
||||
if include_thumbnail:
|
||||
|
||||
@@ -14,6 +14,16 @@ from frigate.util.services import get_video_properties
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CURRENT_CONFIG_VERSION = "0.15-0"
|
||||
DEFAULT_CONFIG_FILE = "/config/config.yml"
|
||||
|
||||
|
||||
def find_config_file() -> str:
|
||||
config_path = os.environ.get("CONFIG_FILE", DEFAULT_CONFIG_FILE)
|
||||
|
||||
if not os.path.isfile(config_path):
|
||||
config_path = config_path.replace("yml", "yaml")
|
||||
|
||||
return config_path
|
||||
|
||||
|
||||
def migrate_frigate_config(config_file: str):
|
||||
|
||||
@@ -219,19 +219,35 @@ def draw_box_with_label(
|
||||
text_width = size[0][0]
|
||||
text_height = size[0][1]
|
||||
line_height = text_height + size[1]
|
||||
# get frame height
|
||||
frame_height = frame.shape[0]
|
||||
# set the text start position
|
||||
if position == "ul":
|
||||
text_offset_x = x_min
|
||||
text_offset_y = 0 if y_min < line_height else y_min - (line_height + 8)
|
||||
text_offset_y = max(0, y_min - (line_height + 8))
|
||||
elif position == "ur":
|
||||
text_offset_x = x_max - (text_width + 8)
|
||||
text_offset_y = 0 if y_min < line_height else y_min - (line_height + 8)
|
||||
text_offset_x = max(0, x_max - (text_width + 8))
|
||||
text_offset_y = max(0, y_min - (line_height + 8))
|
||||
elif position == "bl":
|
||||
text_offset_x = x_min
|
||||
text_offset_y = y_max
|
||||
text_offset_y = min(frame_height - line_height, y_max)
|
||||
elif position == "br":
|
||||
text_offset_x = x_max - (text_width + 8)
|
||||
text_offset_y = y_max
|
||||
text_offset_x = max(0, x_max - (text_width + 8))
|
||||
text_offset_y = min(frame_height - line_height, y_max)
|
||||
# Adjust position if it overlaps with the box or goes out of frame
|
||||
if position in {"ul", "ur"}:
|
||||
if text_offset_y < y_min + thickness: # Label overlaps with the box
|
||||
if y_min - (line_height + 8) < 0 and y_max + line_height <= frame_height:
|
||||
# Not enough space above, and there is space below
|
||||
text_offset_y = y_max
|
||||
elif y_min - (line_height + 8) >= 0:
|
||||
# Enough space above, keep the label at the top
|
||||
text_offset_y = max(0, y_min - (line_height + 8))
|
||||
elif position in {"bl", "br"}:
|
||||
if text_offset_y + line_height > frame_height:
|
||||
# If there's not enough space below, try above the box
|
||||
text_offset_y = max(0, y_min - (line_height + 8))
|
||||
|
||||
# make the coords of the box with a small padding of two pixels
|
||||
textbox_coords = (
|
||||
(text_offset_x, text_offset_y),
|
||||
|
||||
@@ -113,7 +113,7 @@ def capture_frames(
|
||||
fps.value = frame_rate.eps()
|
||||
skipped_fps.value = skipped_eps.eps()
|
||||
current_frame.value = datetime.datetime.now().timestamp()
|
||||
frame_name = f"{config.name}_{frame_index}"
|
||||
frame_name = f"{config.name}_frame{frame_index}"
|
||||
frame_buffer = frame_manager.write(frame_name)
|
||||
try:
|
||||
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
|
||||
|
||||
@@ -29,8 +29,11 @@ export function ApiProvider({ children, options }: ApiProviderType) {
|
||||
error.response &&
|
||||
[401, 302, 307].includes(error.response.status)
|
||||
) {
|
||||
window.location.href =
|
||||
error.response.headers.get("location") ?? "login";
|
||||
// redirect to the login page if not already there
|
||||
const loginPage = error.response.headers.get("location") ?? "login";
|
||||
if (window.location.href !== loginPage) {
|
||||
window.location.href = loginPage;
|
||||
}
|
||||
}
|
||||
},
|
||||
...options,
|
||||
|
||||
@@ -63,7 +63,7 @@ export function UserAuthForm({ className, ...props }: UserAuthFormProps) {
|
||||
toast.error("Exceeded rate limit. Try again later.", {
|
||||
position: "top-center",
|
||||
});
|
||||
} else if (err.response?.status === 400) {
|
||||
} else if (err.response?.status === 401) {
|
||||
toast.error("Login failed", {
|
||||
position: "top-center",
|
||||
});
|
||||
|
||||
@@ -755,7 +755,11 @@ export function CameraGroupEdit({
|
||||
<FormMessage />
|
||||
{[
|
||||
...(birdseyeConfig?.enabled ? ["birdseye"] : []),
|
||||
...Object.keys(config?.cameras ?? {}),
|
||||
...Object.keys(config?.cameras ?? {}).sort(
|
||||
(a, b) =>
|
||||
(config?.cameras[a]?.ui?.order ?? 0) -
|
||||
(config?.cameras[b]?.ui?.order ?? 0),
|
||||
),
|
||||
].map((camera) => (
|
||||
<FormControl key={camera}>
|
||||
<FilterSwitch
|
||||
|
||||
@@ -15,13 +15,15 @@ import {
|
||||
SearchFilter,
|
||||
SearchFilters,
|
||||
SearchSource,
|
||||
SearchSortType,
|
||||
} from "@/types/search";
|
||||
import { DateRange } from "react-day-picker";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { MdLabel } from "react-icons/md";
|
||||
import { MdLabel, MdSort } from "react-icons/md";
|
||||
import PlatformAwareDialog from "../overlay/dialog/PlatformAwareDialog";
|
||||
import SearchFilterDialog from "../overlay/dialog/SearchFilterDialog";
|
||||
import { CalendarRangeFilterButton } from "./CalendarFilterButton";
|
||||
import { RadioGroup, RadioGroupItem } from "@/components/ui/radio-group";
|
||||
|
||||
type SearchFilterGroupProps = {
|
||||
className: string;
|
||||
@@ -107,6 +109,25 @@ export default function SearchFilterGroup({
|
||||
[config, allLabels, allZones],
|
||||
);
|
||||
|
||||
const availableSortTypes = useMemo(() => {
|
||||
const sortTypes = ["date_asc", "date_desc"];
|
||||
if (filter?.min_score || filter?.max_score) {
|
||||
sortTypes.push("score_desc", "score_asc");
|
||||
}
|
||||
if (filter?.event_id || filter?.query) {
|
||||
sortTypes.push("relevance");
|
||||
}
|
||||
return sortTypes as SearchSortType[];
|
||||
}, [filter]);
|
||||
|
||||
const defaultSortType = useMemo<SearchSortType>(() => {
|
||||
if (filter?.query || filter?.event_id) {
|
||||
return "relevance";
|
||||
} else {
|
||||
return "date_desc";
|
||||
}
|
||||
}, [filter]);
|
||||
|
||||
const groups = useMemo(() => {
|
||||
if (!config) {
|
||||
return [];
|
||||
@@ -179,6 +200,16 @@ export default function SearchFilterGroup({
|
||||
filterValues={filterValues}
|
||||
onUpdateFilter={onUpdateFilter}
|
||||
/>
|
||||
{filters.includes("sort") && Object.keys(filter ?? {}).length > 0 && (
|
||||
<SortTypeButton
|
||||
availableSortTypes={availableSortTypes ?? []}
|
||||
defaultSortType={defaultSortType}
|
||||
selectedSortType={filter?.sort}
|
||||
updateSortType={(newSort) => {
|
||||
onUpdateFilter({ ...filter, sort: newSort });
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -362,3 +393,176 @@ export function GeneralFilterContent({
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
type SortTypeButtonProps = {
|
||||
availableSortTypes: SearchSortType[];
|
||||
defaultSortType: SearchSortType;
|
||||
selectedSortType: SearchSortType | undefined;
|
||||
updateSortType: (sortType: SearchSortType | undefined) => void;
|
||||
};
|
||||
function SortTypeButton({
|
||||
availableSortTypes,
|
||||
defaultSortType,
|
||||
selectedSortType,
|
||||
updateSortType,
|
||||
}: SortTypeButtonProps) {
|
||||
const [open, setOpen] = useState(false);
|
||||
const [currentSortType, setCurrentSortType] = useState<
|
||||
SearchSortType | undefined
|
||||
>(selectedSortType as SearchSortType);
|
||||
|
||||
// ui
|
||||
|
||||
useEffect(() => {
|
||||
setCurrentSortType(selectedSortType);
|
||||
// only refresh when state changes
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [selectedSortType]);
|
||||
|
||||
const trigger = (
|
||||
<Button
|
||||
size="sm"
|
||||
variant={
|
||||
selectedSortType != defaultSortType && selectedSortType != undefined
|
||||
? "select"
|
||||
: "default"
|
||||
}
|
||||
className="flex items-center gap-2 capitalize"
|
||||
aria-label="Labels"
|
||||
>
|
||||
<MdSort
|
||||
className={`${selectedSortType != defaultSortType && selectedSortType != undefined ? "text-selected-foreground" : "text-secondary-foreground"}`}
|
||||
/>
|
||||
<div
|
||||
className={`${selectedSortType != defaultSortType && selectedSortType != undefined ? "text-selected-foreground" : "text-primary"}`}
|
||||
>
|
||||
Sort
|
||||
</div>
|
||||
</Button>
|
||||
);
|
||||
const content = (
|
||||
<SortTypeContent
|
||||
availableSortTypes={availableSortTypes ?? []}
|
||||
defaultSortType={defaultSortType}
|
||||
selectedSortType={selectedSortType}
|
||||
currentSortType={currentSortType}
|
||||
setCurrentSortType={setCurrentSortType}
|
||||
updateSortType={updateSortType}
|
||||
onClose={() => setOpen(false)}
|
||||
/>
|
||||
);
|
||||
|
||||
return (
|
||||
<PlatformAwareDialog
|
||||
trigger={trigger}
|
||||
content={content}
|
||||
contentClassName={
|
||||
isDesktop
|
||||
? "scrollbar-container h-auto max-h-[80dvh] overflow-y-auto"
|
||||
: "max-h-[75dvh] overflow-hidden p-4"
|
||||
}
|
||||
open={open}
|
||||
onOpenChange={(open) => {
|
||||
if (!open) {
|
||||
setCurrentSortType(selectedSortType);
|
||||
}
|
||||
|
||||
setOpen(open);
|
||||
}}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
type SortTypeContentProps = {
|
||||
availableSortTypes: SearchSortType[];
|
||||
defaultSortType: SearchSortType;
|
||||
selectedSortType: SearchSortType | undefined;
|
||||
currentSortType: SearchSortType | undefined;
|
||||
updateSortType: (sort_type: SearchSortType | undefined) => void;
|
||||
setCurrentSortType: (sort_type: SearchSortType | undefined) => void;
|
||||
onClose: () => void;
|
||||
};
|
||||
export function SortTypeContent({
|
||||
availableSortTypes,
|
||||
defaultSortType,
|
||||
selectedSortType,
|
||||
currentSortType,
|
||||
updateSortType,
|
||||
setCurrentSortType,
|
||||
onClose,
|
||||
}: SortTypeContentProps) {
|
||||
const sortLabels = {
|
||||
date_asc: "Date (Ascending)",
|
||||
date_desc: "Date (Descending)",
|
||||
score_asc: "Object Score (Ascending)",
|
||||
score_desc: "Object Score (Descending)",
|
||||
relevance: "Relevance",
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="overflow-x-hidden">
|
||||
<div className="my-2.5 flex flex-col gap-2.5">
|
||||
<RadioGroup
|
||||
value={
|
||||
Array.isArray(currentSortType)
|
||||
? currentSortType?.[0]
|
||||
: (currentSortType ?? defaultSortType)
|
||||
}
|
||||
defaultValue={defaultSortType}
|
||||
onValueChange={(value) =>
|
||||
setCurrentSortType(value as SearchSortType)
|
||||
}
|
||||
className="w-full space-y-1"
|
||||
>
|
||||
{availableSortTypes.map((value) => (
|
||||
<div className="flex flex-row gap-2">
|
||||
<RadioGroupItem
|
||||
key={value}
|
||||
value={value}
|
||||
id={`sort-${value}`}
|
||||
className={
|
||||
value == (currentSortType ?? defaultSortType)
|
||||
? "bg-selected from-selected/50 to-selected/90 text-selected"
|
||||
: "bg-secondary from-secondary/50 to-secondary/90 text-secondary"
|
||||
}
|
||||
/>
|
||||
<Label
|
||||
htmlFor={`sort-${value}`}
|
||||
className="flex cursor-pointer items-center space-x-2"
|
||||
>
|
||||
<span>{sortLabels[value]}</span>
|
||||
</Label>
|
||||
</div>
|
||||
))}
|
||||
</RadioGroup>
|
||||
</div>
|
||||
</div>
|
||||
<DropdownMenuSeparator />
|
||||
<div className="flex items-center justify-evenly p-2">
|
||||
<Button
|
||||
aria-label="Apply"
|
||||
variant="select"
|
||||
onClick={() => {
|
||||
if (selectedSortType != currentSortType) {
|
||||
updateSortType(currentSortType);
|
||||
}
|
||||
|
||||
onClose();
|
||||
}}
|
||||
>
|
||||
Apply
|
||||
</Button>
|
||||
<Button
|
||||
aria-label="Reset"
|
||||
onClick={() => {
|
||||
setCurrentSortType(undefined);
|
||||
updateSortType(undefined);
|
||||
}}
|
||||
>
|
||||
Reset
|
||||
</Button>
|
||||
</div>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import {
|
||||
FilterType,
|
||||
SavedSearchQuery,
|
||||
SearchFilter,
|
||||
SearchSortType,
|
||||
SearchSource,
|
||||
} from "@/types/search";
|
||||
import useSuggestions from "@/hooks/use-suggestions";
|
||||
@@ -323,6 +324,9 @@ export default function InputWithTags({
|
||||
case "event_id":
|
||||
newFilters.event_id = value;
|
||||
break;
|
||||
case "sort":
|
||||
newFilters.sort = value as SearchSortType;
|
||||
break;
|
||||
default:
|
||||
// Handle array types (cameras, labels, subLabels, zones)
|
||||
if (!newFilters[type]) newFilters[type] = [];
|
||||
|
||||
@@ -477,7 +477,10 @@ export default function ObjectLifecycle({
|
||||
</p>
|
||||
{Array.isArray(item.data.box) &&
|
||||
item.data.box.length >= 4
|
||||
? (item.data.box[2] / item.data.box[3]).toFixed(2)
|
||||
? (
|
||||
aspectRatio *
|
||||
(item.data.box[2] / item.data.box[3])
|
||||
).toFixed(2)
|
||||
: "N/A"}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -74,6 +74,23 @@ export default function ReviewDetailDialog({
|
||||
return events.length != review?.data.detections.length;
|
||||
}, [review, events]);
|
||||
|
||||
const missingObjects = useMemo(() => {
|
||||
if (!review || !events) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const detectedIds = review.data.detections;
|
||||
const missing = Array.from(
|
||||
new Set(
|
||||
events
|
||||
.filter((event) => !detectedIds.includes(event.id))
|
||||
.map((event) => event.label),
|
||||
),
|
||||
);
|
||||
|
||||
return missing;
|
||||
}, [review, events]);
|
||||
|
||||
const formattedDate = useFormattedTimestamp(
|
||||
review?.start_time ?? 0,
|
||||
config?.ui.time_format == "24hour"
|
||||
@@ -263,8 +280,25 @@ export default function ReviewDetailDialog({
|
||||
</div>
|
||||
{hasMismatch && (
|
||||
<div className="p-4 text-center text-sm">
|
||||
Some objects that were detected are not included in this list
|
||||
because the object does not have a snapshot
|
||||
{(() => {
|
||||
const detectedCount = Math.abs(
|
||||
(events?.length ?? 0) -
|
||||
(review?.data.detections.length ?? 0),
|
||||
);
|
||||
const objectLabel =
|
||||
detectedCount === 1 ? "object was" : "objects were";
|
||||
|
||||
return `${detectedCount} unavailable ${objectLabel} detected and included in this review item.`;
|
||||
})()}{" "}
|
||||
Those objects either did not qualify as an alert or detection
|
||||
or have already been cleaned up/deleted.
|
||||
{missingObjects.length > 0 && (
|
||||
<div className="mt-2">
|
||||
Adjust your configuration if you want Frigate to save
|
||||
tracked objects for the following labels:{" "}
|
||||
{missingObjects.join(", ")}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
<div className="relative flex size-full flex-col gap-2">
|
||||
|
||||
@@ -469,16 +469,43 @@ function ObjectDetailsTab({
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex flex-col gap-1.5">
|
||||
<div className="text-sm text-primary/40">Description</div>
|
||||
<Textarea
|
||||
className="h-64"
|
||||
placeholder="Description of the tracked object"
|
||||
value={desc}
|
||||
onChange={(e) => setDesc(e.target.value)}
|
||||
/>
|
||||
{config?.cameras[search.camera].genai.enabled &&
|
||||
!search.end_time &&
|
||||
(config.cameras[search.camera].genai.required_zones.length === 0 ||
|
||||
search.zones.some((zone) =>
|
||||
config.cameras[search.camera].genai.required_zones.includes(zone),
|
||||
)) &&
|
||||
(config.cameras[search.camera].genai.objects.length === 0 ||
|
||||
config.cameras[search.camera].genai.objects.includes(
|
||||
search.label,
|
||||
)) ? (
|
||||
<>
|
||||
<div className="text-sm text-primary/40">Description</div>
|
||||
<div className="flex h-64 flex-col items-center justify-center gap-3 border p-4 text-sm text-primary/40">
|
||||
<div className="flex">
|
||||
<ActivityIndicator />
|
||||
</div>
|
||||
<div className="flex">
|
||||
Frigate will not request a description from your Generative AI
|
||||
provider until the tracked object's lifecycle has ended.
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<div className="text-sm text-primary/40">Description</div>
|
||||
<Textarea
|
||||
className="h-64"
|
||||
placeholder="Description of the tracked object"
|
||||
value={desc}
|
||||
onChange={(e) => setDesc(e.target.value)}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
|
||||
<div className="flex w-full flex-row justify-end gap-2">
|
||||
{config?.cameras[search.camera].genai.enabled && (
|
||||
<div className="flex items-center">
|
||||
{config?.cameras[search.camera].genai.enabled && search.end_time && (
|
||||
<div className="flex items-start">
|
||||
<Button
|
||||
className="rounded-r-none border-r-0"
|
||||
aria-label="Regenerate tracked object description"
|
||||
@@ -516,13 +543,16 @@ function ObjectDetailsTab({
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
<Button
|
||||
variant="select"
|
||||
aria-label="Save"
|
||||
onClick={updateDescription}
|
||||
>
|
||||
Save
|
||||
</Button>
|
||||
{(config?.cameras[search.camera].genai.enabled && search.end_time) ||
|
||||
(!config?.cameras[search.camera].genai.enabled && (
|
||||
<Button
|
||||
variant="select"
|
||||
aria-label="Save"
|
||||
onClick={updateDescription}
|
||||
>
|
||||
Save
|
||||
</Button>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -175,7 +175,7 @@ export default function SearchFilterDialog({
|
||||
time_range: undefined,
|
||||
zones: undefined,
|
||||
sub_labels: undefined,
|
||||
search_type: ["thumbnail", "description"],
|
||||
search_type: undefined,
|
||||
min_score: undefined,
|
||||
max_score: undefined,
|
||||
has_snapshot: undefined,
|
||||
|
||||
@@ -46,7 +46,7 @@ export default function SearchSettings({
|
||||
const trigger = (
|
||||
<Button
|
||||
className="flex items-center gap-2"
|
||||
aria-label="Search Settings"
|
||||
aria-label="Explore Settings"
|
||||
size="sm"
|
||||
>
|
||||
<FaCog className="text-secondary-foreground" />
|
||||
|
||||
@@ -5,6 +5,7 @@ import { usePersistence } from "./use-persistence";
|
||||
export function useOverlayState<S>(
|
||||
key: string,
|
||||
defaultValue: S | undefined = undefined,
|
||||
preserveSearch: boolean = true,
|
||||
): [S | undefined, (value: S, replace?: boolean) => void] {
|
||||
const location = useLocation();
|
||||
const navigate = useNavigate();
|
||||
@@ -15,7 +16,10 @@ export function useOverlayState<S>(
|
||||
(value: S, replace: boolean = false) => {
|
||||
const newLocationState = { ...currentLocationState };
|
||||
newLocationState[key] = value;
|
||||
navigate(location.pathname, { state: newLocationState, replace });
|
||||
navigate(location.pathname + (preserveSearch ? location.search : ""), {
|
||||
state: newLocationState,
|
||||
replace,
|
||||
});
|
||||
},
|
||||
// we know that these deps are correct
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
|
||||
@@ -39,8 +39,11 @@ export default function Events() {
|
||||
|
||||
const [showReviewed, setShowReviewed] = usePersistence("showReviewed", false);
|
||||
|
||||
const [recording, setRecording] =
|
||||
useOverlayState<RecordingStartingPoint>("recording");
|
||||
const [recording, setRecording] = useOverlayState<RecordingStartingPoint>(
|
||||
"recording",
|
||||
undefined,
|
||||
false,
|
||||
);
|
||||
|
||||
useSearchEffect("id", (reviewId: string) => {
|
||||
axios
|
||||
|
||||
@@ -116,6 +116,7 @@ export default function Explore() {
|
||||
is_submitted: searchSearchParams["is_submitted"],
|
||||
has_clip: searchSearchParams["has_clip"],
|
||||
event_id: searchSearchParams["event_id"],
|
||||
sort: searchSearchParams["sort"],
|
||||
limit:
|
||||
Object.keys(searchSearchParams).length == 0 ? API_LIMIT : undefined,
|
||||
timezone,
|
||||
@@ -148,6 +149,7 @@ export default function Explore() {
|
||||
is_submitted: searchSearchParams["is_submitted"],
|
||||
has_clip: searchSearchParams["has_clip"],
|
||||
event_id: searchSearchParams["event_id"],
|
||||
sort: searchSearchParams["sort"],
|
||||
timezone,
|
||||
include_thumbnails: 0,
|
||||
},
|
||||
@@ -165,12 +167,17 @@ export default function Explore() {
|
||||
|
||||
const [url, params] = searchQuery;
|
||||
|
||||
// If it's not the first page, use the last item's start_time as the 'before' parameter
|
||||
const isAscending = params.sort?.includes("date_asc");
|
||||
|
||||
if (pageIndex > 0 && previousPageData) {
|
||||
const lastDate = previousPageData[previousPageData.length - 1].start_time;
|
||||
return [
|
||||
url,
|
||||
{ ...params, before: lastDate.toString(), limit: API_LIMIT },
|
||||
{
|
||||
...params,
|
||||
[isAscending ? "after" : "before"]: lastDate.toString(),
|
||||
limit: API_LIMIT,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
@@ -321,12 +328,12 @@ export default function Explore() {
|
||||
<div className="flex max-w-96 flex-col items-center justify-center space-y-3 rounded-lg bg-background/50 p-5">
|
||||
<div className="my-5 flex flex-col items-center gap-2 text-xl">
|
||||
<TbExclamationCircle className="mb-3 size-10" />
|
||||
<div>Search Unavailable</div>
|
||||
<div>Explore is Unavailable</div>
|
||||
</div>
|
||||
{embeddingsReindexing && allModelsLoaded && (
|
||||
<>
|
||||
<div className="text-center text-primary-variant">
|
||||
Search can be used after tracked object embeddings have
|
||||
Explore can be used after tracked object embeddings have
|
||||
finished reindexing.
|
||||
</div>
|
||||
<div className="pt-5 text-center">
|
||||
@@ -377,8 +384,8 @@ export default function Explore() {
|
||||
<>
|
||||
<div className="text-center text-primary-variant">
|
||||
Frigate is downloading the necessary embeddings models to
|
||||
support semantic searching. This may take several minutes
|
||||
depending on the speed of your network connection.
|
||||
support the Semantic Search feature. This may take several
|
||||
minutes depending on the speed of your network connection.
|
||||
</div>
|
||||
<div className="flex w-96 flex-col gap-2 py-5">
|
||||
<div className="flex flex-row items-center justify-center gap-2">
|
||||
|
||||
@@ -40,7 +40,7 @@ import UiSettingsView from "@/views/settings/UiSettingsView";
|
||||
|
||||
const allSettingsViews = [
|
||||
"UI settings",
|
||||
"search settings",
|
||||
"explore settings",
|
||||
"camera settings",
|
||||
"masks / zones",
|
||||
"motion tuner",
|
||||
@@ -175,7 +175,7 @@ export default function Settings() {
|
||||
</div>
|
||||
<div className="mt-2 flex h-full w-full flex-col items-start md:h-dvh md:pb-24">
|
||||
{page == "UI settings" && <UiSettingsView />}
|
||||
{page == "search settings" && (
|
||||
{page == "explore settings" && (
|
||||
<SearchSettingsView setUnsavedChanges={setUnsavedChanges} />
|
||||
)}
|
||||
{page == "debug" && (
|
||||
|
||||
@@ -142,6 +142,7 @@ export interface CameraConfig {
|
||||
password: string | null;
|
||||
port: number;
|
||||
user: string | null;
|
||||
tls_insecure: boolean;
|
||||
};
|
||||
record: {
|
||||
enabled: boolean;
|
||||
|
||||
@@ -6,6 +6,7 @@ const SEARCH_FILTERS = [
|
||||
"zone",
|
||||
"sub",
|
||||
"source",
|
||||
"sort",
|
||||
] as const;
|
||||
export type SearchFilters = (typeof SEARCH_FILTERS)[number];
|
||||
export const DEFAULT_SEARCH_FILTERS: SearchFilters[] = [
|
||||
@@ -16,10 +17,18 @@ export const DEFAULT_SEARCH_FILTERS: SearchFilters[] = [
|
||||
"zone",
|
||||
"sub",
|
||||
"source",
|
||||
"sort",
|
||||
];
|
||||
|
||||
export type SearchSource = "similarity" | "thumbnail" | "description";
|
||||
|
||||
export type SearchSortType =
|
||||
| "date_asc"
|
||||
| "date_desc"
|
||||
| "score_asc"
|
||||
| "score_desc"
|
||||
| "relevance";
|
||||
|
||||
export type SearchResult = {
|
||||
id: string;
|
||||
camera: string;
|
||||
@@ -65,6 +74,7 @@ export type SearchFilter = {
|
||||
time_range?: string;
|
||||
search_type?: SearchSource[];
|
||||
event_id?: string;
|
||||
sort?: SearchSortType;
|
||||
};
|
||||
|
||||
export const DEFAULT_TIME_RANGE_AFTER = "00:00";
|
||||
@@ -86,6 +96,7 @@ export type SearchQueryParams = {
|
||||
query?: string;
|
||||
page?: number;
|
||||
time_range?: string;
|
||||
sort?: SearchSortType;
|
||||
};
|
||||
|
||||
export type SearchQuery = [string, SearchQueryParams] | null;
|
||||
|
||||
@@ -17,7 +17,12 @@ import {
|
||||
DropdownMenuItem,
|
||||
DropdownMenuTrigger,
|
||||
} from "@/components/ui/dropdown-menu";
|
||||
import { TooltipProvider } from "@/components/ui/tooltip";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip";
|
||||
import { useResizeObserver } from "@/hooks/resize-observer";
|
||||
import useKeyboardListener from "@/hooks/use-keyboard-listener";
|
||||
import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
|
||||
@@ -29,6 +34,7 @@ import {
|
||||
import { CameraPtzInfo } from "@/types/ptz";
|
||||
import { RecordingStartingPoint } from "@/types/record";
|
||||
import React, {
|
||||
ReactNode,
|
||||
useCallback,
|
||||
useEffect,
|
||||
useMemo,
|
||||
@@ -518,6 +524,53 @@ export default function LiveCameraView({
|
||||
);
|
||||
}
|
||||
|
||||
type TooltipButtonProps = {
|
||||
label: string;
|
||||
onClick?: () => void;
|
||||
onMouseDown?: (e: React.MouseEvent) => void;
|
||||
onMouseUp?: (e: React.MouseEvent) => void;
|
||||
onTouchStart?: (e: React.TouchEvent) => void;
|
||||
onTouchEnd?: (e: React.TouchEvent) => void;
|
||||
children: ReactNode;
|
||||
className?: string;
|
||||
};
|
||||
|
||||
function TooltipButton({
|
||||
label,
|
||||
onClick,
|
||||
onMouseDown,
|
||||
onMouseUp,
|
||||
onTouchStart,
|
||||
onTouchEnd,
|
||||
children,
|
||||
className,
|
||||
...props
|
||||
}: TooltipButtonProps) {
|
||||
return (
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
aria-label={label}
|
||||
onClick={onClick}
|
||||
onMouseDown={onMouseDown}
|
||||
onMouseUp={onMouseUp}
|
||||
onTouchStart={onTouchStart}
|
||||
onTouchEnd={onTouchEnd}
|
||||
className={className}
|
||||
{...props}
|
||||
>
|
||||
{children}
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>{label}</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
);
|
||||
}
|
||||
|
||||
function PtzControlPanel({
|
||||
camera,
|
||||
clickOverlay,
|
||||
@@ -611,8 +664,8 @@ function PtzControlPanel({
|
||||
>
|
||||
{ptz?.features?.includes("pt") && (
|
||||
<>
|
||||
<Button
|
||||
aria-label="Move PTZ camera to the left"
|
||||
<TooltipButton
|
||||
label="Move camera left"
|
||||
onMouseDown={(e) => {
|
||||
e.preventDefault();
|
||||
sendPtz("MOVE_LEFT");
|
||||
@@ -625,9 +678,9 @@ function PtzControlPanel({
|
||||
onTouchEnd={onStop}
|
||||
>
|
||||
<FaAngleLeft />
|
||||
</Button>
|
||||
<Button
|
||||
aria-label="Move PTZ camera up"
|
||||
</TooltipButton>
|
||||
<TooltipButton
|
||||
label="Move camera up"
|
||||
onMouseDown={(e) => {
|
||||
e.preventDefault();
|
||||
sendPtz("MOVE_UP");
|
||||
@@ -640,9 +693,9 @@ function PtzControlPanel({
|
||||
onTouchEnd={onStop}
|
||||
>
|
||||
<FaAngleUp />
|
||||
</Button>
|
||||
<Button
|
||||
aria-label="Move PTZ camera down"
|
||||
</TooltipButton>
|
||||
<TooltipButton
|
||||
label="Move camera down"
|
||||
onMouseDown={(e) => {
|
||||
e.preventDefault();
|
||||
sendPtz("MOVE_DOWN");
|
||||
@@ -655,9 +708,9 @@ function PtzControlPanel({
|
||||
onTouchEnd={onStop}
|
||||
>
|
||||
<FaAngleDown />
|
||||
</Button>
|
||||
<Button
|
||||
aria-label="Move PTZ camera to the right"
|
||||
</TooltipButton>
|
||||
<TooltipButton
|
||||
label="Move camera right"
|
||||
onMouseDown={(e) => {
|
||||
e.preventDefault();
|
||||
sendPtz("MOVE_RIGHT");
|
||||
@@ -670,13 +723,13 @@ function PtzControlPanel({
|
||||
onTouchEnd={onStop}
|
||||
>
|
||||
<FaAngleRight />
|
||||
</Button>
|
||||
</TooltipButton>
|
||||
</>
|
||||
)}
|
||||
{ptz?.features?.includes("zoom") && (
|
||||
<>
|
||||
<Button
|
||||
aria-label="Zoom PTZ camera in"
|
||||
<TooltipButton
|
||||
label="Zoom in"
|
||||
onMouseDown={(e) => {
|
||||
e.preventDefault();
|
||||
sendPtz("ZOOM_IN");
|
||||
@@ -689,9 +742,9 @@ function PtzControlPanel({
|
||||
onTouchEnd={onStop}
|
||||
>
|
||||
<MdZoomIn />
|
||||
</Button>
|
||||
<Button
|
||||
aria-label="Zoom PTZ camera out"
|
||||
</TooltipButton>
|
||||
<TooltipButton
|
||||
label="Zoom out"
|
||||
onMouseDown={(e) => {
|
||||
e.preventDefault();
|
||||
sendPtz("ZOOM_OUT");
|
||||
@@ -704,45 +757,60 @@ function PtzControlPanel({
|
||||
onTouchEnd={onStop}
|
||||
>
|
||||
<MdZoomOut />
|
||||
</Button>
|
||||
</TooltipButton>
|
||||
</>
|
||||
)}
|
||||
|
||||
{ptz?.features?.includes("pt-r-fov") && (
|
||||
<>
|
||||
<Button
|
||||
className={`${clickOverlay ? "text-selected" : "text-primary"}`}
|
||||
aria-label="Click in the frame to center the PTZ camera"
|
||||
onClick={() => setClickOverlay(!clickOverlay)}
|
||||
>
|
||||
<TbViewfinder />
|
||||
</Button>
|
||||
</>
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
className={`${clickOverlay ? "text-selected" : "text-primary"}`}
|
||||
aria-label="Click in the frame to center the camera"
|
||||
onClick={() => setClickOverlay(!clickOverlay)}
|
||||
>
|
||||
<TbViewfinder />
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>{clickOverlay ? "Disable" : "Enable"} click to move</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
)}
|
||||
{(ptz?.presets?.length ?? 0) > 0 && (
|
||||
<DropdownMenu modal={!isDesktop}>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button aria-label="PTZ camera presets">
|
||||
<BsThreeDotsVertical />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent
|
||||
className="scrollbar-container max-h-[40dvh] overflow-y-auto"
|
||||
onCloseAutoFocus={(e) => e.preventDefault()}
|
||||
>
|
||||
{ptz?.presets.map((preset) => {
|
||||
return (
|
||||
<DropdownMenuItem
|
||||
key={preset}
|
||||
aria-label={preset}
|
||||
className="cursor-pointer"
|
||||
onSelect={() => sendPtz(`preset_${preset}`)}
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<DropdownMenu modal={!isDesktop}>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button aria-label="PTZ camera presets">
|
||||
<BsThreeDotsVertical />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent
|
||||
className="scrollbar-container max-h-[40dvh] overflow-y-auto"
|
||||
onCloseAutoFocus={(e) => e.preventDefault()}
|
||||
>
|
||||
{preset}
|
||||
</DropdownMenuItem>
|
||||
);
|
||||
})}
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
{ptz?.presets.map((preset) => (
|
||||
<DropdownMenuItem
|
||||
key={preset}
|
||||
aria-label={preset}
|
||||
className="cursor-pointer"
|
||||
onSelect={() => sendPtz(`preset_${preset}`)}
|
||||
>
|
||||
{preset}
|
||||
</DropdownMenuItem>
|
||||
))}
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>PTZ camera presets</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -91,7 +91,7 @@ export default function SearchSettingsView({
|
||||
)
|
||||
.then((res) => {
|
||||
if (res.status === 200) {
|
||||
toast.success("Search settings have been saved.", {
|
||||
toast.success("Explore settings have been saved.", {
|
||||
position: "top-center",
|
||||
});
|
||||
setChangedValue(false);
|
||||
@@ -128,7 +128,7 @@ export default function SearchSettingsView({
|
||||
if (changedValue) {
|
||||
addMessage(
|
||||
"search_settings",
|
||||
`Unsaved search settings changes`,
|
||||
`Unsaved Explore settings changes`,
|
||||
undefined,
|
||||
"search_settings",
|
||||
);
|
||||
@@ -140,7 +140,7 @@ export default function SearchSettingsView({
|
||||
}, [changedValue]);
|
||||
|
||||
useEffect(() => {
|
||||
document.title = "Search Settings - Frigate";
|
||||
document.title = "Explore Settings - Frigate";
|
||||
}, []);
|
||||
|
||||
if (!config) {
|
||||
@@ -152,7 +152,7 @@ export default function SearchSettingsView({
|
||||
<Toaster position="top-center" closeButton={true} />
|
||||
<div className="scrollbar-container order-last mb-10 mt-2 flex h-full w-full flex-col overflow-y-auto rounded-lg border-[1px] border-secondary-foreground bg-background_alt p-2 md:order-none md:mb-0 md:mr-2 md:mt-0">
|
||||
<Heading as="h3" className="my-2">
|
||||
Search Settings
|
||||
Explore Settings
|
||||
</Heading>
|
||||
<Separator className="my-2 flex bg-secondary" />
|
||||
<Heading as="h4" className="my-2">
|
||||
@@ -221,7 +221,7 @@ export default function SearchSettingsView({
|
||||
<div className="text-md">Model Size</div>
|
||||
<div className="space-y-1 text-sm text-muted-foreground">
|
||||
<p>
|
||||
The size of the model used for semantic search embeddings.
|
||||
The size of the model used for Semantic Search embeddings.
|
||||
</p>
|
||||
<ul className="list-disc pl-5 text-sm">
|
||||
<li>
|
||||
|
||||
Reference in New Issue
Block a user