Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
c71bb53e8b Update uvicorn requirement from ==0.30.* to ==0.34.* in /docker/main
Updates the requirements on [uvicorn](https://github.com/encode/uvicorn) to permit the latest version.
- [Release notes](https://github.com/encode/uvicorn/releases)
- [Changelog](https://github.com/encode/uvicorn/blob/master/CHANGELOG.md)
- [Commits](https://github.com/encode/uvicorn/compare/0.30.0...0.34.0)

---
updated-dependencies:
- dependency-name: uvicorn
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-16 12:06:49 +00:00
54 changed files with 2341 additions and 5486 deletions

View File

@@ -75,6 +75,15 @@ jobs:
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
- name: Build and push Rockchip build
uses: docker/bake-action@v3
with:
push: true
targets: rk
files: docker/rockchip/rk.hcl
set: |
rk.tags=${{ steps.setup.outputs.image-name }}-rk
*.cache-from=type=gha
jetson_jp4_build:
runs-on: ubuntu-latest
name: Jetson Jetpack 4

View File

@@ -76,7 +76,7 @@ jobs:
with:
persist-credentials: false
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
uses: actions/setup-python@v5.3.0
uses: actions/setup-python@v5.1.0
with:
python-version: ${{ env.DEFAULT_PYTHON }}
- name: Install requirements

View File

@@ -61,7 +61,7 @@ def start(id, num_detections, detection_queue, event):
object_detector.cleanup()
print(f"{id} - Processed for {duration:.2f} seconds.")
print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
print(f"{id} - Average frame processing time: {mean(frame_times) * 1000:.2f}ms")
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
######

View File

@@ -1,12 +1,12 @@
appdirs==1.4.*
argcomplete==2.0.*
contextlib2==0.6.*
distlib==0.3.*
filelock==3.8.*
future==0.18.*
importlib-metadata==5.1.*
importlib-resources==5.1.*
netaddr==0.8.*
netifaces==0.10.*
verboselogs==1.7.*
virtualenv==20.17.*
appdirs==1.4.4
argcomplete==2.0.0
contextlib2==0.6.0.post1
distlib==0.3.6
filelock==3.8.0
future==0.18.2
importlib-metadata==5.1.0
importlib-resources==5.1.2
netaddr==0.8.0
netifaces==0.10.9
verboselogs==1.7
virtualenv==20.17.0

View File

@@ -4,7 +4,7 @@ aiohttp == 3.11.2
starlette == 0.41.2
starlette-context == 0.3.6
fastapi == 0.115.*
uvicorn == 0.30.*
uvicorn == 0.34.*
slowapi == 0.1.*
imutils == 0.5.*
joserfc == 1.0.*

View File

@@ -12,11 +12,26 @@ ARG TARGETARCH
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
# Build CuDNN
FROM wget AS cudnn-deps
ARG COMPUTE_LEVEL
RUN apt-get update \
&& apt-get install -y git build-essential
RUN wget https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.1-1_all.deb \
&& dpkg -i cuda-keyring_1.1-1_all.deb \
&& apt-get update \
&& apt-get -y install cuda-toolkit \
&& rm -rf /var/lib/apt/lists/*
FROM tensorrt-base AS frigate-tensorrt
ENV TRT_VER=8.5.3
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl && \
ldconfig
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
ENV LD_LIBRARY_PATH=/usr/local/lib/python3.9/dist-packages/tensorrt:/usr/local/cuda/lib64:/usr/local/lib/python3.9/dist-packages/nvidia/cufft/lib
WORKDIR /opt/frigate/
@@ -27,7 +42,7 @@ FROM devcontainer AS devcontainer-trt
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
COPY --from=trt-deps /usr/local/cuda-12.1 /usr/local/cuda
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
COPY docker/tensorrt/detector/rootfs/ /
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \

View File

@@ -24,7 +24,6 @@ ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
COPY --from=trt-deps /usr/local/cuda-12.* /usr/local/cuda
COPY docker/tensorrt/detector/rootfs/ /
ENV YOLO_MODELS=""

View File

@@ -41,7 +41,6 @@ cameras:
...
onvif:
# Required: host of the camera being connected to.
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
host: 0.0.0.0
# Optional: ONVIF port for device (default: shown below).
port: 8000
@@ -50,8 +49,6 @@ cameras:
user: admin
# Optional: password for login.
password: admin
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
tls_insecure: False
# Optional: PTZ camera object autotracking. Keeps a moving object in
# the center of the frame by automatically moving the PTZ camera.
autotracking:

View File

@@ -156,9 +156,7 @@ cameras:
#### Reolink Doorbell
The reolink doorbell supports two way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
Ensure HTTP is enabled in the camera's advanced network settings. To use two way talk with Frigate, see the [Live view documentation](/configuration/live#two-way-talk).
The reolink doorbell supports 2-way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
```yaml
go2rtc:

View File

@@ -5,8 +5,6 @@ title: Generative AI
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle. Descriptions can also be regenerated manually via the Frigate UI.
:::info
Semantic Search must be enabled to use Generative AI.

View File

@@ -203,13 +203,14 @@ detectors:
ov:
type: openvino
device: AUTO
model:
path: /openvino-model/ssdlite_mobilenet_v2.xml
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
path: /openvino-model/ssdlite_mobilenet_v2.xml
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
record:

View File

@@ -29,7 +29,7 @@ The default video and audio codec on your camera may not always be compatible wi
### Audio Support
MSE Requires PCMA/PCMU or AAC audio, WebRTC requires PCMA/PCMU or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
MSE Requires AAC audio, WebRTC requires PCMU/PCMA, or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
```yaml
go2rtc:
@@ -138,13 +138,3 @@ services:
:::
See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.3#module-webrtc) for more information about this.
### Two way talk
For devices that support two way talk, Frigate can be configured to use the feature from the camera's Live view in the Web UI. You should:
- Set up go2rtc with [WebRTC](#webrtc-extra-configuration).
- Ensure you access Frigate via https (may require [opening port 8971](/frigate/installation/#ports)).
- For the Home Assistant Frigate card, [follow the docs](https://github.com/dermotduffy/frigate-hass-card?tab=readme-ov-file#using-2-way-audio) for the correct source.
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell)

View File

@@ -144,9 +144,7 @@ detectors:
#### SSDLite MobileNet v2
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model.
Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model:
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
```yaml
detectors:
@@ -256,7 +254,6 @@ yolov4x-mish-640
yolov7-tiny-288
yolov7-tiny-416
yolov7-640
yolov7-416
yolov7-320
yolov7x-640
yolov7x-320
@@ -285,8 +282,6 @@ The TensorRT detector can be selected by specifying `tensorrt` as the model type
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
Use the config below to work with generated TRT models:
```yaml
detectors:
tensorrt:
@@ -506,12 +501,11 @@ detectors:
cpu1:
type: cpu
num_threads: 3
model:
path: "/custom_model.tflite"
cpu2:
type: cpu
num_threads: 3
model:
path: "/custom_model.tflite"
```
When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance.
@@ -638,6 +632,8 @@ detectors:
hailo8l:
type: hailo8l
device: PCIe
model:
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
model:
width: 300
@@ -645,5 +641,4 @@ model:
input_tensor: nhwc
input_pixel_format: bgr
model_type: ssd
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
```

View File

@@ -52,7 +52,7 @@ detectors:
# Required: name of the detector
detector_name:
# Required: type of the detector
# Frigate provides many types, see https://docs.frigate.video/configuration/object_detectors for more details (default: shown below)
# Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below)
# Additional detector types can also be plugged in.
# Detectors may require additional configuration.
# Refer to the Detectors configuration page for more information.
@@ -117,27 +117,25 @@ auth:
hash_iterations: 600000
# Optional: model modifications
# NOTE: The default values are for the EdgeTPU detector.
# Other detectors will require the model config to be set.
model:
# Required: path to the model (default: automatic based on detector)
# Optional: path to the model (default: automatic based on detector)
path: /edgetpu_model.tflite
# Required: path to the labelmap (default: shown below)
# Optional: path to the labelmap (default: shown below)
labelmap_path: /labelmap.txt
# Required: Object detection model input width (default: shown below)
width: 320
# Required: Object detection model input height (default: shown below)
height: 320
# Required: Object detection model input colorspace
# Optional: Object detection model input colorspace
# Valid values are rgb, bgr, or yuv. (default: shown below)
input_pixel_format: rgb
# Required: Object detection model input tensor format
# Optional: Object detection model input tensor format
# Valid values are nhwc or nchw (default: shown below)
input_tensor: nhwc
# Required: Object detection model type, currently only used with the OpenVINO detector
# Optional: Object detection model type, currently only used with the OpenVINO detector
# Valid values are ssd, yolox, yolonas (default: shown below)
model_type: ssd
# Required: Label name modifications. These are merged into the standard labelmap.
# Optional: Label name modifications. These are merged into the standard labelmap.
labelmap:
2: vehicle
# Optional: Map of object labels to their attribute labels (default: depends on model)
@@ -688,7 +686,6 @@ cameras:
# to enable PTZ controls.
onvif:
# Required: host of the camera being connected to.
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
host: 0.0.0.0
# Optional: ONVIF port for device (default: shown below).
port: 8000
@@ -697,8 +694,6 @@ cameras:
user: admin
# Optional: password for login.
password: admin
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
tls_insecure: False
# Optional: Ignores time synchronization mismatches between the camera and the server during authentication.
# Using NTP on both ends is recommended and this should only be set to True in a "safe" environment due to the security risk it represents.
ignore_time_mismatch: False
@@ -762,8 +757,6 @@ cameras:
- cat
# Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify)
required_zones: []
# Optional: Save thumbnails sent to generative AI for review/debugging purposes (default: shown below)
debug_save_thumbnails: False
# Optional
ui:

View File

@@ -305,15 +305,8 @@ To install make sure you have the [community app plugin here](https://forums.unr
## Proxmox
[According to Proxmox documentation](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_pct) it is recommended that you run application containers like Frigate inside a Proxmox QEMU VM. This will give you all the advantages of application containerization, while also providing the benefits that VMs offer, such as strong isolation from the host and the ability to live-migrate, which otherwise isnt possible with containers.
It is recommended to run Frigate in LXC, rather than in a VM, for maximum performance. The setup can be complex so be prepared to read the Proxmox and LXC documentation. Suggestions include:
:::warning
If you choose to run Frigate via LXC in Proxmox the setup can be complex so be prepared to read the Proxmox and LXC documentation, Frigate does not officially support running inside of an LXC.
:::
Suggestions include:
- For Intel-based hardware acceleration, to allow access to the `/dev/dri/renderD128` device with major number 226 and minor number 128, add the following lines to the `/etc/pve/lxc/<id>.conf` LXC configuration:
- `lxc.cgroup2.devices.allow: c 226:128 rwm`
- `lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file`

View File

@@ -3,15 +3,7 @@ id: recordings
title: Troubleshooting Recordings
---
## I have Frigate configured for motion recording only, but it still seems to be recording even with no motion. Why?
You'll want to:
- Make sure your camera's timestamp is masked out with a motion mask. Even if there is no motion occurring in your scene, your motion settings may be sensitive enough to count your timestamp as motion.
- If you have audio detection enabled, keep in mind that audio that is heard above `min_volume` is considered motion.
- [Tune your motion detection settings](/configuration/motion_detection) either by editing your config file or by using the UI's Motion Tuner.
## I see the message: WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
### WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
This error can be caused by a number of different issues. The first step in troubleshooting is to enable debug logging for recording. This will enable logging showing how long it takes for recordings to be moved from RAM cache to the disk.
@@ -48,7 +40,6 @@ On linux, some helpful tools/commands in diagnosing would be:
On modern linux kernels, the system will utilize some swap if enabled. Setting vm.swappiness=1 no longer means that the kernel will only swap in order to avoid OOM. To prevent any swapping inside a container, set allocations memory and memory+swap to be the same and disable swapping by setting the following docker/podman run parameters:
**Compose example**
```yaml
version: "3.9"
services:
@@ -63,7 +54,6 @@ services:
```
**Run command example**
```
--memory=<MAXRAM> --memory-swap=<MAXSWAP> --memory-swappiness=0
```

7061
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -17,15 +17,15 @@
"write-heading-ids": "docusaurus write-heading-ids"
},
"dependencies": {
"@docusaurus/core": "^3.6.3",
"@docusaurus/preset-classic": "^3.6.3",
"@docusaurus/theme-mermaid": "^3.6.3",
"@docusaurus/plugin-content-docs": "^3.6.3",
"@mdx-js/react": "^3.1.0",
"@docusaurus/core": "^3.5.2",
"@docusaurus/preset-classic": "^3.5.2",
"@docusaurus/theme-mermaid": "^3.5.2",
"@docusaurus/plugin-content-docs": "^3.5.2",
"@mdx-js/react": "^3.0.1",
"clsx": "^2.1.1",
"docusaurus-plugin-openapi-docs": "^4.3.1",
"docusaurus-theme-openapi-docs": "^4.3.1",
"prism-react-renderer": "^2.4.1",
"docusaurus-plugin-openapi-docs": "^4.1.0",
"docusaurus-theme-openapi-docs": "^4.1.0",
"prism-react-renderer": "^2.4.0",
"raw-loader": "^4.0.2",
"react": "^18.3.1",
"react-dom": "^18.3.1"

View File

@@ -139,8 +139,6 @@ def config(request: Request):
mode="json", warnings="none", exclude_none=True
)
for stream_name, stream in go2rtc.get("streams", {}).items():
if stream is None:
continue
if isinstance(stream, str):
cleaned = clean_camera_user_pass(stream)
else:

View File

@@ -437,7 +437,7 @@ class FrigateApp:
# pre-create shms
for i in range(shm_frame_count):
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
self.frame_manager.create(f"{config.name}_frame{i}", frame_size)
self.frame_manager.create(f"{config.name}_{i}", frame_size)
capture_process = util.Process(
target=capture_camera,

View File

@@ -151,7 +151,7 @@ class WebPushClient(Communicator): # type: ignore[misc]
camera: str = payload["after"]["camera"]
title = f"{', '.join(sorted_objects).replace('_', ' ').title()}{' was' if state == 'end' else ''} detected in {', '.join(payload['after']['data']['zones']).replace('_', ' ').title()}"
message = f"Detected on {camera.replace('_', ' ').title()}"
image = f"{payload['after']['thumb_path'].replace('/media/frigate', '')}"
image = f'{payload["after"]["thumb_path"].replace("/media/frigate", "")}'
# if event is ongoing open to live view otherwise open to recordings view
direct_url = f"/review?id={reviewId}" if state == "end" else f"/#{camera}"

View File

@@ -38,10 +38,6 @@ class GenAICameraConfig(BaseModel):
default_factory=list,
title="List of required zones to be entered in order to run generative AI.",
)
debug_save_thumbnails: bool = Field(
default=False,
title="Save thumbnails sent to generative AI for debugging purposes.",
)
@field_validator("required_zones", mode="before")
@classmethod

View File

@@ -74,7 +74,6 @@ class OnvifConfig(FrigateBaseModel):
port: int = Field(default=8000, title="Onvif Port")
user: Optional[EnvString] = Field(default=None, title="Onvif Username")
password: Optional[EnvString] = Field(default=None, title="Onvif Password")
tls_insecure: bool = Field(default=False, title="Onvif Disable TLS verification")
autotracking: PtzAutotrackConfig = Field(
default_factory=PtzAutotrackConfig,
title="PTZ auto tracking config.",

View File

@@ -4,7 +4,6 @@ from typing import Optional
from pydantic import Field
from frigate.const import MAX_PRE_CAPTURE
from frigate.review.types import SeverityEnum
from ..base import FrigateBaseModel
@@ -102,15 +101,3 @@ class RecordConfig(FrigateBaseModel):
self.alerts.pre_capture,
self.detections.pre_capture,
)
def get_review_pre_capture(self, severity: SeverityEnum) -> int:
if severity == SeverityEnum.alert:
return self.alerts.pre_capture
else:
return self.detections.pre_capture
def get_review_post_capture(self, severity: SeverityEnum) -> int:
if severity == SeverityEnum.alert:
return self.alerts.post_capture
else:
return self.detections.post_capture

View File

@@ -85,7 +85,7 @@ class ZoneConfig(BaseModel):
if explicit:
self.coordinates = ",".join(
[
f"{round(int(p.split(',')[0]) / frame_shape[1], 3)},{round(int(p.split(',')[1]) / frame_shape[0], 3)}"
f'{round(int(p.split(",")[0]) / frame_shape[1], 3)},{round(int(p.split(",")[1]) / frame_shape[0], 3)}'
for p in coordinates
]
)

View File

@@ -594,27 +594,35 @@ class FrigateConfig(FrigateBaseModel):
if isinstance(detector, dict)
else detector.model_dump(warnings="none")
)
detector_config: BaseDetectorConfig = adapter.validate_python(model_dict)
detector_config: DetectorConfig = adapter.validate_python(model_dict)
if detector_config.model is None:
detector_config.model = self.model.model_copy()
else:
path = detector_config.model.path
detector_config.model = self.model.model_copy()
detector_config.model.path = path
# users should not set model themselves
if detector_config.model:
detector_config.model = None
if "path" not in model_dict or len(model_dict.keys()) > 1:
logger.warning(
"Customizing more than a detector model path is unsupported."
)
model_config = self.model.model_dump(exclude_unset=True, warnings="none")
merged_model = deep_merge(
detector_config.model.model_dump(exclude_unset=True, warnings="none"),
self.model.model_dump(exclude_unset=True, warnings="none"),
)
if detector_config.model_path:
model_config["path"] = detector_config.model_path
if "path" not in model_config:
if "path" not in merged_model:
if detector_config.type == "cpu":
model_config["path"] = "/cpu_model.tflite"
merged_model["path"] = "/cpu_model.tflite"
elif detector_config.type == "edgetpu":
model_config["path"] = "/edgetpu_model.tflite"
merged_model["path"] = "/edgetpu_model.tflite"
model = ModelConfig.model_validate(model_config)
model.check_and_load_plus_model(self.plus_api, detector_config.type)
model.compute_model_hash()
detector_config.model = model
detector_config.model = ModelConfig.model_validate(merged_model)
detector_config.model.check_and_load_plus_model(
self.plus_api, detector_config.type
)
detector_config.model.compute_model_hash()
self.detectors[key] = detector_config
return self

View File

@@ -194,9 +194,6 @@ class BaseDetectorConfig(BaseModel):
model: Optional[ModelConfig] = Field(
default=None, title="Detector specific model configuration."
)
model_path: Optional[str] = Field(
default=None, title="Detector specific model path."
)
model_config = ConfigDict(
extra="allow", arbitrary_types_allowed=True, protected_namespaces=()
)

View File

@@ -219,19 +219,19 @@ class TensorRtDetector(DetectionApi):
]
def __init__(self, detector_config: TensorRTDetectorConfig):
assert TRT_SUPPORT, (
f"TensorRT libraries not found, {DETECTOR_KEY} detector not present"
)
assert (
TRT_SUPPORT
), f"TensorRT libraries not found, {DETECTOR_KEY} detector not present"
(cuda_err,) = cuda.cuInit(0)
assert cuda_err == cuda.CUresult.CUDA_SUCCESS, (
f"Failed to initialize cuda {cuda_err}"
)
assert (
cuda_err == cuda.CUresult.CUDA_SUCCESS
), f"Failed to initialize cuda {cuda_err}"
err, dev_count = cuda.cuDeviceGetCount()
logger.debug(f"Num Available Devices: {dev_count}")
assert detector_config.device < dev_count, (
f"Invalid TensorRT Device Config. Device {detector_config.device} Invalid."
)
assert (
detector_config.device < dev_count
), f"Invalid TensorRT Device Config. Device {detector_config.device} Invalid."
err, self.cu_ctx = cuda.cuCtxCreate(
cuda.CUctx_flags.CU_CTX_MAP_HOST, detector_config.device
)

View File

@@ -5,7 +5,6 @@ import logging
import os
import threading
from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path
from typing import Optional
import cv2
@@ -218,47 +217,16 @@ class EmbeddingMaintainer(threading.Thread):
_, buffer = cv2.imencode(".jpg", cropped_image)
snapshot_image = buffer.tobytes()
num_thumbnails = len(self.tracked_events.get(event_id, []))
embed_image = (
[snapshot_image]
if event.has_snapshot and camera_config.genai.use_snapshot
else (
[
data["thumbnail"]
for data in self.tracked_events[event_id]
]
if num_thumbnails > 0
[thumbnail for data in self.tracked_events[event_id]]
if len(self.tracked_events.get(event_id, [])) > 0
else [thumbnail]
)
)
if camera_config.genai.debug_save_thumbnails and num_thumbnails > 0:
logger.debug(
f"Saving {num_thumbnails} thumbnails for event {event.id}"
)
Path(
os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")
).mkdir(parents=True, exist_ok=True)
for idx, data in enumerate(self.tracked_events[event_id], 1):
jpg_bytes: bytes = data["thumbnail"]
if jpg_bytes is None:
logger.warning(
f"Unable to save thumbnail {idx} for {event.id}."
)
else:
with open(
os.path.join(
CLIPS_DIR,
f"genai-requests/{event.id}/{idx}.jpg",
),
"wb",
) as j:
j.write(jpg_bytes)
# Generate the description. Call happens in a thread since it is network bound.
threading.Thread(
target=self._embed_description,
@@ -389,7 +357,7 @@ class EmbeddingMaintainer(threading.Thread):
[snapshot_image]
if event.has_snapshot and source == "snapshot"
else (
[data["thumbnail"] for data in self.tracked_events[event_id]]
[thumbnail for data in self.tracked_events[event_id]]
if len(self.tracked_events.get(event_id, [])) > 0
else [thumbnail]
)

View File

@@ -121,8 +121,8 @@ class EventCleanup(threading.Thread):
events_to_update = []
for event in query.iterator():
events_to_update.append(event.id)
for batch in query.iterator():
events_to_update.extend([event.id for event in batch])
if len(events_to_update) >= CHUNK_SIZE:
logger.debug(
f"Updating {update_params} for {len(events_to_update)} events"
@@ -257,7 +257,7 @@ class EventCleanup(threading.Thread):
events_to_update = []
for event in query.iterator():
events_to_update.append(event.id)
events_to_update.append(event)
if len(events_to_update) >= CHUNK_SIZE:
logger.debug(

View File

@@ -71,8 +71,8 @@ PRESETS_HW_ACCEL_DECODE = {
"preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m",
"preset-rpi-64-h265": "-c:v:1 hevc_v4l2m2m",
FFMPEG_HWACCEL_VAAPI: f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi",
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv -bsf:v dump_extra", # https://trac.ffmpeg.org/ticket/9766#comment:17
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v hevc_qsv -bsf:v dump_extra", # https://trac.ffmpeg.org/ticket/9766#comment:17
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv",
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v hevc_qsv",
FFMPEG_HWACCEL_NVIDIA: "-hwaccel cuda -hwaccel_output_format cuda",
"preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}",
"preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}",

View File

@@ -68,13 +68,11 @@ class PlusApi:
or self._token_data["expires"] - datetime.datetime.now().timestamp() < 60
):
if self.key is None:
raise Exception(
"Plus API key not set. See https://docs.frigate.video/integrations/plus#set-your-api-key"
)
raise Exception("Plus API not activated")
parts = self.key.split(":")
r = requests.get(f"{self.host}/v1/auth/token", auth=(parts[0], parts[1]))
if not r.ok:
raise Exception(f"Unable to refresh API token: {r.text}")
raise Exception("Unable to refresh API token")
self._token_data = r.json()
def _get_authorization_header(self) -> dict:
@@ -118,6 +116,15 @@ class PlusApi:
logger.error(f"Failed to upload original: {r.status_code} {r.text}")
raise Exception(r.text)
# resize and submit annotate
files = {"file": get_jpg_bytes(image, 640, 70)}
data = presigned_urls["annotate"]["fields"]
data["content-type"] = "image/jpeg"
r = requests.post(presigned_urls["annotate"]["url"], files=files, data=data)
if not r.ok:
logger.error(f"Failed to upload annotate: {r.status_code} {r.text}")
raise Exception(r.text)
# resize and submit thumbnail
files = {"file": get_jpg_bytes(image, 200, 70)}
data = presigned_urls["thumbnail"]["fields"]

View File

@@ -135,7 +135,7 @@ class PtzMotionEstimator:
try:
logger.debug(
f"{camera}: Motion estimator transformation: {self.coord_transformations.rel_to_abs([[0, 0]])}"
f"{camera}: Motion estimator transformation: {self.coord_transformations.rel_to_abs([[0,0]])}"
)
except Exception:
pass
@@ -471,7 +471,7 @@ class PtzAutoTracker:
self.onvif.get_camera_status(camera)
logger.info(
f"Calibration for {camera} in progress: {round((step / num_steps) * 100)}% complete"
f"Calibration for {camera} in progress: {round((step/num_steps)*100)}% complete"
)
self.calibrating[camera] = False
@@ -690,7 +690,7 @@ class PtzAutoTracker:
f"{camera}: Predicted movement time: {self._predict_movement_time(camera, pan, tilt)}"
)
logger.debug(
f"{camera}: Actual movement time: {self.ptz_metrics[camera].stop_time.value - self.ptz_metrics[camera].start_time.value}"
f"{camera}: Actual movement time: {self.ptz_metrics[camera].stop_time.value-self.ptz_metrics[camera].start_time.value}"
)
# save metrics for better estimate calculations
@@ -983,10 +983,10 @@ class PtzAutoTracker:
logger.debug(f"{camera}: Zoom test: at max zoom: {at_max_zoom}")
logger.debug(f"{camera}: Zoom test: at min zoom: {at_min_zoom}")
logger.debug(
f"{camera}: Zoom test: zoom in hysteresis limit: {zoom_in_hysteresis} value: {AUTOTRACKING_ZOOM_IN_HYSTERESIS} original: {self.tracked_object_metrics[camera]['original_target_box']} max: {self.tracked_object_metrics[camera]['max_target_box']} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]['target_box']}"
f'{camera}: Zoom test: zoom in hysteresis limit: {zoom_in_hysteresis} value: {AUTOTRACKING_ZOOM_IN_HYSTERESIS} original: {self.tracked_object_metrics[camera]["original_target_box"]} max: {self.tracked_object_metrics[camera]["max_target_box"]} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]["target_box"]}'
)
logger.debug(
f"{camera}: Zoom test: zoom out hysteresis limit: {zoom_out_hysteresis} value: {AUTOTRACKING_ZOOM_OUT_HYSTERESIS} original: {self.tracked_object_metrics[camera]['original_target_box']} max: {self.tracked_object_metrics[camera]['max_target_box']} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]['target_box']}"
f'{camera}: Zoom test: zoom out hysteresis limit: {zoom_out_hysteresis} value: {AUTOTRACKING_ZOOM_OUT_HYSTERESIS} original: {self.tracked_object_metrics[camera]["original_target_box"]} max: {self.tracked_object_metrics[camera]["max_target_box"]} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]["target_box"]}'
)
# Zoom in conditions (and)
@@ -1069,7 +1069,7 @@ class PtzAutoTracker:
pan = ((centroid_x / camera_width) - 0.5) * 2
tilt = (0.5 - (centroid_y / camera_height)) * 2
logger.debug(f"{camera}: Original box: {obj.obj_data['box']}")
logger.debug(f'{camera}: Original box: {obj.obj_data["box"]}')
logger.debug(f"{camera}: Predicted box: {tuple(predicted_box)}")
logger.debug(
f"{camera}: Velocity: {tuple(np.round(average_velocity).flatten().astype(int))}"
@@ -1179,7 +1179,7 @@ class PtzAutoTracker:
)
zoom = (ratio - 1) / (ratio + 1)
logger.debug(
f"{camera}: limit: {self.tracked_object_metrics[camera]['max_target_box']}, ratio: {ratio} zoom calculation: {zoom}"
f'{camera}: limit: {self.tracked_object_metrics[camera]["max_target_box"]}, ratio: {ratio} zoom calculation: {zoom}'
)
if not result:
# zoom out with special condition if zooming out because of velocity, edges, etc.

View File

@@ -6,7 +6,6 @@ from importlib.util import find_spec
from pathlib import Path
import numpy
import requests
from onvif import ONVIFCamera, ONVIFError
from zeep.exceptions import Fault, TransportError
from zeep.transports import Transport
@@ -49,11 +48,7 @@ class OnvifController:
if cam.onvif.host:
try:
session = requests.Session()
session.verify = not cam.onvif.tls_insecure
transport = Transport(
timeout=10, operation_timeout=10, session=session
)
transport = Transport(timeout=10, operation_timeout=10)
self.cams[cam_name] = {
"onvif": ONVIFCamera(
cam.onvif.host,
@@ -563,26 +558,22 @@ class OnvifController:
if not self._init_onvif(camera_name):
return
try:
if command == OnvifCommandEnum.init:
# already init
return
elif command == OnvifCommandEnum.stop:
self._stop(camera_name)
elif command == OnvifCommandEnum.preset:
self._move_to_preset(camera_name, param)
elif command == OnvifCommandEnum.move_relative:
_, pan, tilt = param.split("_")
self._move_relative(camera_name, float(pan), float(tilt), 0, 1)
elif (
command == OnvifCommandEnum.zoom_in
or command == OnvifCommandEnum.zoom_out
):
self._zoom(camera_name, command)
else:
self._move(camera_name, command)
except ONVIFError as e:
logger.error(f"Unable to handle onvif command: {e}")
if command == OnvifCommandEnum.init:
# already init
return
elif command == OnvifCommandEnum.stop:
self._stop(camera_name)
elif command == OnvifCommandEnum.preset:
self._move_to_preset(camera_name, param)
elif command == OnvifCommandEnum.move_relative:
_, pan, tilt = param.split("_")
self._move_relative(camera_name, float(pan), float(tilt), 0, 1)
elif (
command == OnvifCommandEnum.zoom_in or command == OnvifCommandEnum.zoom_out
):
self._zoom(camera_name, command)
else:
self._move(camera_name, command)
def get_camera_info(self, camera_name: str) -> dict[str, any]:
if camera_name not in self.cams.keys():

View File

@@ -29,7 +29,6 @@ from frigate.const import (
RECORD_DIR,
)
from frigate.models import Recordings, ReviewSegment
from frigate.review.types import SeverityEnum
from frigate.util.services import get_video_properties
logger = logging.getLogger(__name__)
@@ -195,7 +194,6 @@ class RecordingMaintainer(threading.Thread):
ReviewSegment.select(
ReviewSegment.start_time,
ReviewSegment.end_time,
ReviewSegment.severity,
ReviewSegment.data,
)
.where(
@@ -221,15 +219,11 @@ class RecordingMaintainer(threading.Thread):
[r for r in recordings_to_insert if r is not None],
)
def drop_segment(self, cache_path: str) -> None:
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
async def validate_and_move_segment(
self, camera: str, reviews: list[ReviewSegment], recording: dict[str, any]
) -> None:
cache_path: str = recording["cache_path"]
start_time: datetime.datetime = recording["start_time"]
cache_path = recording["cache_path"]
start_time = recording["start_time"]
record_config = self.config.cameras[camera].record
# Just delete files if recordings are turned off
@@ -237,7 +231,8 @@ class RecordingMaintainer(threading.Thread):
camera not in self.config.cameras
or not self.config.cameras[camera].record.enabled
):
self.drop_segment(cache_path)
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
return
if cache_path in self.end_time_cache:
@@ -265,34 +260,24 @@ class RecordingMaintainer(threading.Thread):
return
# if cached file's start_time is earlier than the retain days for the camera
# meaning continuous recording is not enabled
if start_time <= (
datetime.datetime.now().astimezone(datetime.timezone.utc)
- datetime.timedelta(days=self.config.cameras[camera].record.retain.days)
):
# if the cached segment overlaps with the review items:
# if the cached segment overlaps with the events:
overlaps = False
for review in reviews:
severity = SeverityEnum[review.severity]
# if the review item starts in the future, stop checking review items
# if the event starts in the future, stop checking events
# and remove this segment
if (
review.start_time - record_config.get_review_pre_capture(severity)
) > end_time.timestamp():
if review.start_time > end_time.timestamp():
overlaps = False
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
break
# if the review item is in progress or ends after the recording starts, keep it
# and stop looking at review items
if (
review.end_time is None
or (
review.end_time
+ record_config.get_review_post_capture(severity)
)
>= start_time.timestamp()
):
# if the event is in progress or ends after the recording starts, keep it
# and stop looking at events
if review.end_time is None or review.end_time >= start_time.timestamp():
overlaps = True
break
@@ -311,7 +296,7 @@ class RecordingMaintainer(threading.Thread):
cache_path,
record_mode,
)
# if it doesn't overlap with an review item, go ahead and drop the segment
# if it doesn't overlap with an event, go ahead and drop the segment
# if it ends more than the configured pre_capture for the camera
else:
camera_info = self.object_recordings_info[camera]
@@ -322,9 +307,9 @@ class RecordingMaintainer(threading.Thread):
most_recently_processed_frame_time - record_config.event_pre_capture
).astimezone(datetime.timezone.utc)
if end_time < retain_cutoff:
self.drop_segment(cache_path)
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
# else retain days includes this segment
# meaning continuous recording is enabled
else:
# assume that empty means the relevant recording info has not been received yet
camera_info = self.object_recordings_info[camera]
@@ -405,7 +390,8 @@ class RecordingMaintainer(threading.Thread):
# check if the segment shouldn't be stored
if segment_info.should_discard_segment(store_mode):
self.drop_segment(cache_path)
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
return
# directory will be in utc due to start_time being in utc
@@ -449,7 +435,7 @@ class RecordingMaintainer(threading.Thread):
return None
else:
logger.debug(
f"Copied {file_path} in {datetime.datetime.now().timestamp() - start_frame} seconds."
f"Copied {file_path} in {datetime.datetime.now().timestamp()-start_frame} seconds."
)
try:

View File

@@ -256,7 +256,7 @@ class ReviewSegmentMaintainer(threading.Thread):
elif object["sub_label"][0] in self.config.model.all_attributes:
segment.detections[object["id"]] = object["sub_label"][0]
else:
segment.detections[object["id"]] = f"{object['label']}-verified"
segment.detections[object["id"]] = f'{object["label"]}-verified'
segment.sub_labels[object["id"]] = object["sub_label"][0]
# if object is alert label
@@ -352,7 +352,7 @@ class ReviewSegmentMaintainer(threading.Thread):
elif object["sub_label"][0] in self.config.model.all_attributes:
detections[object["id"]] = object["sub_label"][0]
else:
detections[object["id"]] = f"{object['label']}-verified"
detections[object["id"]] = f'{object["label"]}-verified'
sub_labels[object["id"]] = object["sub_label"][0]
# if object is alert label
@@ -527,9 +527,7 @@ class ReviewSegmentMaintainer(threading.Thread):
if event_id in self.indefinite_events[camera]:
self.indefinite_events[camera].pop(event_id)
if len(self.indefinite_events[camera]) == 0:
current_segment.last_update = manual_info["end_time"]
current_segment.last_update = manual_info["end_time"]
else:
logger.error(
f"Event with ID {event_id} has a set duration and can not be ended manually."

View File

@@ -72,7 +72,8 @@ class BaseServiceProcess(Service, ABC):
running = False
except TimeoutError:
self.manager.logger.warning(
f"{self.name} is still running after {timeout} seconds. Killing."
f"{self.name} is still running after "
f"{timeout} seconds. Killing."
)
if running:

View File

@@ -293,7 +293,7 @@ def stats_snapshot(
for path in [RECORD_DIR, CLIPS_DIR, CACHE_DIR, "/dev/shm"]:
try:
storage_stats = shutil.disk_usage(path)
except (FileNotFoundError, OSError):
except FileNotFoundError:
stats["service"]["storage"][path] = {}
continue

View File

@@ -75,11 +75,11 @@ class TestConfig(unittest.TestCase):
"detectors": {
"cpu": {
"type": "cpu",
"model_path": "/cpu_model.tflite",
"model": {"path": "/cpu_model.tflite"},
},
"edgetpu": {
"type": "edgetpu",
"model_path": "/edgetpu_model.tflite",
"model": {"path": "/edgetpu_model.tflite"},
},
"openvino": {
"type": "openvino",

View File

@@ -339,7 +339,7 @@ class TrackedObject:
box[2],
box[3],
self.obj_data["label"],
f"{int(self.thumbnail_data['score'] * 100)}% {int(self.thumbnail_data['area'])}",
f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}",
thickness=thickness,
color=color,
)

View File

@@ -13,7 +13,7 @@ from frigate.util.services import get_video_properties
logger = logging.getLogger(__name__)
CURRENT_CONFIG_VERSION = "0.15-1"
CURRENT_CONFIG_VERSION = "0.15-0"
DEFAULT_CONFIG_FILE = "/config/config.yml"
@@ -77,13 +77,6 @@ def migrate_frigate_config(config_file: str):
yaml.dump(new_config, f)
previous_version = "0.15-0"
if previous_version < "0.15-1":
logger.info(f"Migrating frigate config from {previous_version} to 0.15-1...")
new_config = migrate_015_1(config)
with open(config_file, "w") as f:
yaml.dump(new_config, f)
previous_version = "0.15-1"
logger.info("Finished frigate config migration...")
@@ -274,21 +267,6 @@ def migrate_015_0(config: dict[str, dict[str, any]]) -> dict[str, dict[str, any]
return new_config
def migrate_015_1(config: dict[str, dict[str, any]]) -> dict[str, dict[str, any]]:
"""Handle migrating frigate config to 0.15-1"""
new_config = config.copy()
for detector, detector_config in config.get("detectors", {}).items():
path = detector_config.get("model", {}).get("path")
if path:
new_config["detectors"][detector]["model_path"] = path
del new_config["detectors"][detector]["model"]
new_config["version"] = "0.15-1"
return new_config
def get_relative_coordinates(
mask: Optional[Union[str, list]], frame_shape: tuple[int, int]
) -> Union[str, list]:
@@ -314,7 +292,7 @@ def get_relative_coordinates(
continue
rel_points.append(
f"{round(x / frame_shape[1], 3)},{round(y / frame_shape[0], 3)}"
f"{round(x / frame_shape[1], 3)},{round(y / frame_shape[0], 3)}"
)
relative_masks.append(",".join(rel_points))
@@ -337,7 +315,7 @@ def get_relative_coordinates(
return []
rel_points.append(
f"{round(x / frame_shape[1], 3)},{round(y / frame_shape[0], 3)}"
f"{round(x / frame_shape[1], 3)},{round(y / frame_shape[0], 3)}"
)
mask = ",".join(rel_points)

View File

@@ -390,22 +390,12 @@ def try_get_info(f, h, default="N/A"):
def get_nvidia_gpu_stats() -> dict[int, dict]:
names: dict[str, int] = {}
results = {}
try:
nvml.nvmlInit()
deviceCount = nvml.nvmlDeviceGetCount()
for i in range(deviceCount):
handle = nvml.nvmlDeviceGetHandleByIndex(i)
gpu_name = nvml.nvmlDeviceGetName(handle)
# handle case where user has multiple of same GPU
if gpu_name in names:
names[gpu_name] += 1
gpu_name += f" ({names.get(gpu_name)})"
else:
names[gpu_name] = 1
meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle)
util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
enc = try_get_info(nvml.nvmlDeviceGetEncoderUtilization, handle)
@@ -433,7 +423,7 @@ def get_nvidia_gpu_stats() -> dict[int, dict]:
dec_util = -1
results[i] = {
"name": gpu_name,
"name": nvml.nvmlDeviceGetName(handle),
"gpu": gpu_util,
"mem": gpu_mem_util,
"enc": enc_util,

View File

@@ -113,7 +113,7 @@ def capture_frames(
fps.value = frame_rate.eps()
skipped_fps.value = skipped_eps.eps()
current_frame.value = datetime.datetime.now().timestamp()
frame_name = f"{config.name}_frame{frame_index}"
frame_name = f"{config.name}_{frame_index}"
frame_buffer = frame_manager.write(frame_name)
try:
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)

View File

@@ -208,7 +208,7 @@ class ProcessClip:
box[2],
box[3],
obj["id"],
f"{int(obj['score'] * 100)}% {int(obj['area'])}",
f"{int(obj['score']*100)}% {int(obj['area'])}",
thickness=thickness,
color=color,
)
@@ -227,7 +227,7 @@ class ProcessClip:
)
cv2.imwrite(
f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time * 1000000)}.jpg",
f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg",
current_frame,
)
@@ -290,7 +290,7 @@ def process(path, label, output, debug_path):
1 for result in results if result[1]["true_positive_objects"] > 0
)
print(
f"Objects were detected in {positive_count}/{len(results)}({positive_count / len(results) * 100:.2f}%) clip(s)."
f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s)."
)
if output:

View File

@@ -755,11 +755,7 @@ export function CameraGroupEdit({
<FormMessage />
{[
...(birdseyeConfig?.enabled ? ["birdseye"] : []),
...Object.keys(config?.cameras ?? {}).sort(
(a, b) =>
(config?.cameras[a]?.ui?.order ?? 0) -
(config?.cameras[b]?.ui?.order ?? 0),
),
...Object.keys(config?.cameras ?? {}),
].map((camera) => (
<FormControl key={camera}>
<FilterSwitch

View File

@@ -477,10 +477,7 @@ export default function ObjectLifecycle({
</p>
{Array.isArray(item.data.box) &&
item.data.box.length >= 4
? (
aspectRatio *
(item.data.box[2] / item.data.box[3])
).toFixed(2)
? (item.data.box[2] / item.data.box[3]).toFixed(2)
: "N/A"}
</div>
</div>

View File

@@ -280,24 +280,12 @@ export default function ReviewDetailDialog({
</div>
{hasMismatch && (
<div className="p-4 text-center text-sm">
{(() => {
const detectedCount = Math.abs(
(events?.length ?? 0) -
(review?.data.detections.length ?? 0),
);
const objectLabel =
detectedCount === 1 ? "object was" : "objects were";
return `${detectedCount} unavailable ${objectLabel} detected and included in this review item.`;
})()}{" "}
Those objects either did not qualify as an alert or detection
or have already been cleaned up/deleted.
Some objects may have been detected in this review item that
did not qualify as an alert or detection. Adjust your
configuration if you want Frigate to save tracked objects for
any missing labels.
{missingObjects.length > 0 && (
<div className="mt-2">
Adjust your configuration if you want Frigate to save
tracked objects for the following labels:{" "}
{missingObjects.join(", ")}
</div>
<div className="mt-2">{missingObjects.join(", ")}</div>
)}
</div>
)}

View File

@@ -469,43 +469,16 @@ function ObjectDetailsTab({
</div>
</div>
<div className="flex flex-col gap-1.5">
{config?.cameras[search.camera].genai.enabled &&
!search.end_time &&
(config.cameras[search.camera].genai.required_zones.length === 0 ||
search.zones.some((zone) =>
config.cameras[search.camera].genai.required_zones.includes(zone),
)) &&
(config.cameras[search.camera].genai.objects.length === 0 ||
config.cameras[search.camera].genai.objects.includes(
search.label,
)) ? (
<>
<div className="text-sm text-primary/40">Description</div>
<div className="flex h-64 flex-col items-center justify-center gap-3 border p-4 text-sm text-primary/40">
<div className="flex">
<ActivityIndicator />
</div>
<div className="flex">
Frigate will not request a description from your Generative AI
provider until the tracked object's lifecycle has ended.
</div>
</div>
</>
) : (
<>
<div className="text-sm text-primary/40">Description</div>
<Textarea
className="h-64"
placeholder="Description of the tracked object"
value={desc}
onChange={(e) => setDesc(e.target.value)}
/>
</>
)}
<div className="text-sm text-primary/40">Description</div>
<Textarea
className="h-64"
placeholder="Description of the tracked object"
value={desc}
onChange={(e) => setDesc(e.target.value)}
/>
<div className="flex w-full flex-row justify-end gap-2">
{config?.cameras[search.camera].genai.enabled && search.end_time && (
<div className="flex items-start">
{config?.cameras[search.camera].genai.enabled && (
<div className="flex items-center">
<Button
className="rounded-r-none border-r-0"
aria-label="Regenerate tracked object description"
@@ -543,16 +516,13 @@ function ObjectDetailsTab({
)}
</div>
)}
{(config?.cameras[search.camera].genai.enabled && search.end_time) ||
(!config?.cameras[search.camera].genai.enabled && (
<Button
variant="select"
aria-label="Save"
onClick={updateDescription}
>
Save
</Button>
))}
<Button
variant="select"
aria-label="Save"
onClick={updateDescription}
>
Save
</Button>
</div>
</div>
</div>

View File

@@ -46,7 +46,7 @@ export default function SearchSettings({
const trigger = (
<Button
className="flex items-center gap-2"
aria-label="Explore Settings"
aria-label="Search Settings"
size="sm"
>
<FaCog className="text-secondary-foreground" />

View File

@@ -328,12 +328,12 @@ export default function Explore() {
<div className="flex max-w-96 flex-col items-center justify-center space-y-3 rounded-lg bg-background/50 p-5">
<div className="my-5 flex flex-col items-center gap-2 text-xl">
<TbExclamationCircle className="mb-3 size-10" />
<div>Explore is Unavailable</div>
<div>Search Unavailable</div>
</div>
{embeddingsReindexing && allModelsLoaded && (
<>
<div className="text-center text-primary-variant">
Explore can be used after tracked object embeddings have
Search can be used after tracked object embeddings have
finished reindexing.
</div>
<div className="pt-5 text-center">
@@ -384,8 +384,8 @@ export default function Explore() {
<>
<div className="text-center text-primary-variant">
Frigate is downloading the necessary embeddings models to
support the Semantic Search feature. This may take several
minutes depending on the speed of your network connection.
support semantic searching. This may take several minutes
depending on the speed of your network connection.
</div>
<div className="flex w-96 flex-col gap-2 py-5">
<div className="flex flex-row items-center justify-center gap-2">

View File

@@ -40,7 +40,7 @@ import UiSettingsView from "@/views/settings/UiSettingsView";
const allSettingsViews = [
"UI settings",
"explore settings",
"search settings",
"camera settings",
"masks / zones",
"motion tuner",
@@ -175,7 +175,7 @@ export default function Settings() {
</div>
<div className="mt-2 flex h-full w-full flex-col items-start md:h-dvh md:pb-24">
{page == "UI settings" && <UiSettingsView />}
{page == "explore settings" && (
{page == "search settings" && (
<SearchSettingsView setUnsavedChanges={setUnsavedChanges} />
)}
{page == "debug" && (

View File

@@ -142,7 +142,6 @@ export interface CameraConfig {
password: string | null;
port: number;
user: string | null;
tls_insecure: boolean;
};
record: {
enabled: boolean;

View File

@@ -17,12 +17,7 @@ import {
DropdownMenuItem,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu";
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/ui/tooltip";
import { TooltipProvider } from "@/components/ui/tooltip";
import { useResizeObserver } from "@/hooks/resize-observer";
import useKeyboardListener from "@/hooks/use-keyboard-listener";
import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
@@ -34,7 +29,6 @@ import {
import { CameraPtzInfo } from "@/types/ptz";
import { RecordingStartingPoint } from "@/types/record";
import React, {
ReactNode,
useCallback,
useEffect,
useMemo,
@@ -524,53 +518,6 @@ export default function LiveCameraView({
);
}
type TooltipButtonProps = {
label: string;
onClick?: () => void;
onMouseDown?: (e: React.MouseEvent) => void;
onMouseUp?: (e: React.MouseEvent) => void;
onTouchStart?: (e: React.TouchEvent) => void;
onTouchEnd?: (e: React.TouchEvent) => void;
children: ReactNode;
className?: string;
};
function TooltipButton({
label,
onClick,
onMouseDown,
onMouseUp,
onTouchStart,
onTouchEnd,
children,
className,
...props
}: TooltipButtonProps) {
return (
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<Button
aria-label={label}
onClick={onClick}
onMouseDown={onMouseDown}
onMouseUp={onMouseUp}
onTouchStart={onTouchStart}
onTouchEnd={onTouchEnd}
className={className}
{...props}
>
{children}
</Button>
</TooltipTrigger>
<TooltipContent>
<p>{label}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
);
}
function PtzControlPanel({
camera,
clickOverlay,
@@ -664,8 +611,8 @@ function PtzControlPanel({
>
{ptz?.features?.includes("pt") && (
<>
<TooltipButton
label="Move camera left"
<Button
aria-label="Move PTZ camera to the left"
onMouseDown={(e) => {
e.preventDefault();
sendPtz("MOVE_LEFT");
@@ -678,9 +625,9 @@ function PtzControlPanel({
onTouchEnd={onStop}
>
<FaAngleLeft />
</TooltipButton>
<TooltipButton
label="Move camera up"
</Button>
<Button
aria-label="Move PTZ camera up"
onMouseDown={(e) => {
e.preventDefault();
sendPtz("MOVE_UP");
@@ -693,9 +640,9 @@ function PtzControlPanel({
onTouchEnd={onStop}
>
<FaAngleUp />
</TooltipButton>
<TooltipButton
label="Move camera down"
</Button>
<Button
aria-label="Move PTZ camera down"
onMouseDown={(e) => {
e.preventDefault();
sendPtz("MOVE_DOWN");
@@ -708,9 +655,9 @@ function PtzControlPanel({
onTouchEnd={onStop}
>
<FaAngleDown />
</TooltipButton>
<TooltipButton
label="Move camera right"
</Button>
<Button
aria-label="Move PTZ camera to the right"
onMouseDown={(e) => {
e.preventDefault();
sendPtz("MOVE_RIGHT");
@@ -723,13 +670,13 @@ function PtzControlPanel({
onTouchEnd={onStop}
>
<FaAngleRight />
</TooltipButton>
</Button>
</>
)}
{ptz?.features?.includes("zoom") && (
<>
<TooltipButton
label="Zoom in"
<Button
aria-label="Zoom PTZ camera in"
onMouseDown={(e) => {
e.preventDefault();
sendPtz("ZOOM_IN");
@@ -742,9 +689,9 @@ function PtzControlPanel({
onTouchEnd={onStop}
>
<MdZoomIn />
</TooltipButton>
<TooltipButton
label="Zoom out"
</Button>
<Button
aria-label="Zoom PTZ camera out"
onMouseDown={(e) => {
e.preventDefault();
sendPtz("ZOOM_OUT");
@@ -757,60 +704,45 @@ function PtzControlPanel({
onTouchEnd={onStop}
>
<MdZoomOut />
</TooltipButton>
</Button>
</>
)}
{ptz?.features?.includes("pt-r-fov") && (
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<Button
className={`${clickOverlay ? "text-selected" : "text-primary"}`}
aria-label="Click in the frame to center the camera"
onClick={() => setClickOverlay(!clickOverlay)}
>
<TbViewfinder />
</Button>
</TooltipTrigger>
<TooltipContent>
<p>{clickOverlay ? "Disable" : "Enable"} click to move</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
<>
<Button
className={`${clickOverlay ? "text-selected" : "text-primary"}`}
aria-label="Click in the frame to center the PTZ camera"
onClick={() => setClickOverlay(!clickOverlay)}
>
<TbViewfinder />
</Button>
</>
)}
{(ptz?.presets?.length ?? 0) > 0 && (
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<DropdownMenu modal={!isDesktop}>
<DropdownMenuTrigger asChild>
<Button aria-label="PTZ camera presets">
<BsThreeDotsVertical />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent
className="scrollbar-container max-h-[40dvh] overflow-y-auto"
onCloseAutoFocus={(e) => e.preventDefault()}
<DropdownMenu modal={!isDesktop}>
<DropdownMenuTrigger asChild>
<Button aria-label="PTZ camera presets">
<BsThreeDotsVertical />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent
className="scrollbar-container max-h-[40dvh] overflow-y-auto"
onCloseAutoFocus={(e) => e.preventDefault()}
>
{ptz?.presets.map((preset) => {
return (
<DropdownMenuItem
key={preset}
aria-label={preset}
className="cursor-pointer"
onSelect={() => sendPtz(`preset_${preset}`)}
>
{ptz?.presets.map((preset) => (
<DropdownMenuItem
key={preset}
aria-label={preset}
className="cursor-pointer"
onSelect={() => sendPtz(`preset_${preset}`)}
>
{preset}
</DropdownMenuItem>
))}
</DropdownMenuContent>
</DropdownMenu>
</TooltipTrigger>
<TooltipContent>
<p>PTZ camera presets</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
{preset}
</DropdownMenuItem>
);
})}
</DropdownMenuContent>
</DropdownMenu>
)}
</div>
);

View File

@@ -91,7 +91,7 @@ export default function SearchSettingsView({
)
.then((res) => {
if (res.status === 200) {
toast.success("Explore settings have been saved.", {
toast.success("Search settings have been saved.", {
position: "top-center",
});
setChangedValue(false);
@@ -128,7 +128,7 @@ export default function SearchSettingsView({
if (changedValue) {
addMessage(
"search_settings",
`Unsaved Explore settings changes`,
`Unsaved search settings changes`,
undefined,
"search_settings",
);
@@ -140,7 +140,7 @@ export default function SearchSettingsView({
}, [changedValue]);
useEffect(() => {
document.title = "Explore Settings - Frigate";
document.title = "Search Settings - Frigate";
}, []);
if (!config) {
@@ -152,7 +152,7 @@ export default function SearchSettingsView({
<Toaster position="top-center" closeButton={true} />
<div className="scrollbar-container order-last mb-10 mt-2 flex h-full w-full flex-col overflow-y-auto rounded-lg border-[1px] border-secondary-foreground bg-background_alt p-2 md:order-none md:mb-0 md:mr-2 md:mt-0">
<Heading as="h3" className="my-2">
Explore Settings
Search Settings
</Heading>
<Separator className="my-2 flex bg-secondary" />
<Heading as="h4" className="my-2">
@@ -221,7 +221,7 @@ export default function SearchSettingsView({
<div className="text-md">Model Size</div>
<div className="space-y-1 text-sm text-muted-foreground">
<p>
The size of the model used for Semantic Search embeddings.
The size of the model used for semantic search embeddings.
</p>
<ul className="list-disc pl-5 text-sm">
<li>