Compare commits

..

19 Commits

Author SHA1 Message Date
Blake Blackshear
27a31e731f install i965-va-driver-shaders (#5451) 2023-02-11 09:57:15 -06:00
Nicolas Mowen
562e2627c2 Auto select gpu for hwaccel presets (#5406)
* Add ability to GPU device to be automatically detected when multiple exist

* Add logging info

* Fix access

* Fix

* Formatting

* Fix path of device

* Use log error instead of raise

* Remove log which could apply to other caess

* Set default value

* rework logic and support auto gpu selection for encoding gpu as well
2023-02-11 08:00:58 -06:00
Felipe Santos
babd976533 Bump go2rtc from 1.1.1 to 1.1.2 (#5440)
* Bump go2rtc from 1.1.1 to 1.1.2

* Update go2rtc version in docs
2023-02-09 09:32:46 -06:00
Felipe Santos
748815b6ce Fix logging IP instead of WebRTC port (#5417) 2023-02-08 21:28:20 -06:00
Nicolas Mowen
88252e0ae6 Update ffmpeg_presets.py (#5433) 2023-02-08 21:27:43 -06:00
Nicolas Mowen
c0bf69b4bf add note about go2rtc webui (#5430) 2023-02-08 21:26:38 -06:00
Nicolas Mowen
b6b10e753f Update docs for H265 (#5398)
* Update hwaccel docs for h265

* Update camera specific

* Update hardware_acceleration.md

* Update hardware_acceleration.md
2023-02-06 07:54:01 -06:00
Nicolas Mowen
4a45089b95 Scrollbar juddering (#5383)
* added scrollbar width to observer width.

* subtract scrollBarWidth from scaledWidth

* useMemo dependencies

---------

Co-authored-by: Bernt Christian Egeland <cbegelan@gmail.com>
2023-02-05 09:13:15 -06:00
Blake Blackshear
3b9bcb356b update presets docs (#5386) 2023-02-05 09:12:40 -06:00
Blake Blackshear
e10ddb343c additional shutdown optimizations (#5380) 2023-02-04 08:58:45 -06:00
Blake Blackshear
e8cd25ddf2 Docs tweaks (#5379)
* add note about cameras needing ffmpeg for go2rtc

* clarify error message on GPU stats
2023-02-04 08:47:27 -06:00
Blake Blackshear
624c314335 Fast restart (#5378)
* dont wait so long for queues

* implement stop methods for comms

* set the detection events on exit and return early from processing

* handle the stop event in the broadcast threads

* short circuit the detection process exit code if it already exited

* some logging for stats thread

* just keep the log process alive 1 second after the last log message

* ensure the multiprocessing queues are emptied and closed

* Update frigate/log.py

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>

* Update frigate/log.py

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>

* mypy fixes

---------

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
2023-02-03 20:15:47 -06:00
Anil Ozyalcin
b33094207c YOLOX support for OpenVINO Detector (#5285)
* Initial commit to enable Yolox models with OpenVINO in Frigate

* Fix ModelEnumtType import error in openvino.py

* Initial edit of the docs to include verbage about yolox

* Initial edit of the docs to include verbage about yolox

* Elaborate configuration and limitations in docs.

* Add capability to dynamically determine number of classes in yolox model

* Further refinements

* Removed unnecesarry comments, improved documentation, addressed PR items

* Fixed lint formatting issues
2023-02-03 19:36:37 -06:00
jvrobert
7083a5c9b6 Try to limit nvidia GPU queries to included GPUs (#5356)
* Try to limit nvidia GPU queries to included GPUs

* ignore non digit GPU indexes

* formatting

* Formatting

* Remove trailing spaces

---------

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
2023-02-03 19:34:07 -06:00
Nicolas Mowen
db131d4971 Add Low latency preset (#5367)
* Create new low latency preset

* Update chart for new preset
2023-02-03 19:33:25 -06:00
Nicolas Mowen
74d6ab0555 Fix copying logs (#5373) 2023-02-03 19:32:31 -06:00
Nicolas Mowen
66881eb89f Add examples of intel inference times from survey answers (#5332) 2023-02-01 18:10:24 -06:00
Nicolas Mowen
ad60f4894b Update go2rtc to 1.1.1 (#5333)
* Update go2rtc to 1.1.1

* Remove redundant qualifiers

* Remove qualifiers

* Set default_query

* Update version
2023-02-01 18:09:56 -06:00
James L
8d21c950a3 Name capture processes (#5340) 2023-02-01 17:49:18 -06:00
31 changed files with 401 additions and 92 deletions

View File

@@ -27,7 +27,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
FROM wget AS go2rtc FROM wget AS go2rtc
ARG TARGETARCH ARG TARGETARCH
WORKDIR /rootfs/usr/local/go2rtc/bin WORKDIR /rootfs/usr/local/go2rtc/bin
RUN wget -qO go2rtc "https://github.com/AlexxIT/go2rtc/releases/download/v1.1.0/go2rtc_linux_${TARGETARCH}" \ RUN wget -qO go2rtc "https://github.com/AlexxIT/go2rtc/releases/download/v1.1.2/go2rtc_linux_${TARGETARCH}" \
&& chmod +x go2rtc && chmod +x go2rtc

View File

@@ -64,6 +64,9 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
apt-get -qq install --no-install-recommends --no-install-suggests -y \ apt-get -qq install --no-install-recommends --no-install-suggests -y \
intel-opencl-icd \ intel-opencl-icd \
mesa-va-drivers libva-drm2 intel-media-va-driver-non-free i965-va-driver libmfx1 radeontop intel-gpu-tools mesa-va-drivers libva-drm2 intel-media-va-driver-non-free i965-va-driver libmfx1 radeontop intel-gpu-tools
# something about this dependency requires it to be installed in a separate call rather than in the line above
apt-get -qq install --no-install-recommends --no-install-suggests -y \
i965-va-driver-shaders
rm -f /etc/apt/sources.list.d/debian-testing.list rm -f /etc/apt/sources.list.d/debian-testing.list
fi fi

View File

@@ -35,7 +35,7 @@ function get_ip_and_port_from_supervisor() {
jq --exit-status --raw-output '.data.network["8555/tcp"]' jq --exit-status --raw-output '.data.network["8555/tcp"]'
) && [[ "${webrtc_port}" =~ ${port_regex} ]]; then ) && [[ "${webrtc_port}" =~ ${port_regex} ]]; then
webrtc_port="${BASH_REMATCH[1]}" webrtc_port="${BASH_REMATCH[1]}"
echo "[INFO] Got WebRTC port from supervisor: ${ip_address}" >&2 echo "[INFO] Got WebRTC port from supervisor: ${webrtc_port}" >&2
else else
echo "[WARN] Failed to get WebRTC port from supervisor" >&2 echo "[WARN] Failed to get WebRTC port from supervisor" >&2
return 0 return 0

View File

@@ -44,6 +44,14 @@ if not go2rtc_config.get("webrtc", {}).get("candidates", []):
else: else:
print("[INFO] Not injecting WebRTC candidates into go2rtc config as it has been set manually", file=sys.stderr) print("[INFO] Not injecting WebRTC candidates into go2rtc config as it has been set manually", file=sys.stderr)
# sets default RTSP response to be equivalent to ?video=h264,h265&audio=aac
# this means user does not need to specify audio codec when using restream
# as source for frigate and the integration supports HLS playback
if go2rtc_config.get("rtsp") is None:
go2rtc_config["rtsp"] = {"default_query": "mp4"}
elif go2rtc_config["rtsp"].get("default_query") is None:
go2rtc_config["rtsp"]["default_query"] = "mp4"
# need to replace ffmpeg command when using ffmpeg4 # need to replace ffmpeg command when using ffmpeg4
if not os.path.exists(BTBN_PATH): if not os.path.exists(BTBN_PATH):
if go2rtc_config.get("ffmpeg") is None: if go2rtc_config.get("ffmpeg") is None:

View File

@@ -101,7 +101,7 @@ The OpenVINO device to be used is specified using the `"device"` attribute accor
OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. A supported Intel platform is required to use the `GPU` device with OpenVINO. The `MYRIAD` device may be run on any platform, including Arm devices. For detailed system requirements, see [OpenVINO System Requirements](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/system-requirements.html) OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. A supported Intel platform is required to use the `GPU` device with OpenVINO. The `MYRIAD` device may be run on any platform, including Arm devices. For detailed system requirements, see [OpenVINO System Requirements](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/system-requirements.html)
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector. An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
```yaml ```yaml
detectors: detectors:
@@ -119,6 +119,25 @@ model:
labelmap_path: /openvino-model/coco_91cl_bkgr.txt labelmap_path: /openvino-model/coco_91cl_bkgr.txt
``` ```
This detector also supports YOLOx models, and has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. Frigate does not come with `yolox_tiny` model, you will need to follow [OpenVINO documentation](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) to provide your own model to Frigate. There is currently no support for other types of YOLO models (YOLOv3, YOLOv4, etc...). Below is an example of how `yolox_tiny` and other yolox variants can be used in Frigate:
```yaml
detectors:
ov:
type: openvino
device: AUTO
model:
path: /path/to/yolox_tiny.xml
model:
width: 416
height: 416
input_tensor: nchw
input_pixel_format: bgr
model_type: yolox
labelmap_path: /path/to/coco_80cl.txt
```
### Intel NCS2 VPU and Myriad X Setup ### Intel NCS2 VPU and Myriad X Setup
Intel produces a neural net inference accelleration chip called Myriad X. This chip was sold in their Neural Compute Stick 2 (NCS2) which has been discontinued. If intending to use the MYRIAD device for accelleration, additional setup is required to pass through the USB device. The host needs a udev rule installed to handle the NCS2 device. Intel produces a neural net inference accelleration chip called Myriad X. This chip was sold in their Neural Compute Stick 2 (NCS2) which has been discontinued. If intending to use the MYRIAD device for accelleration, additional setup is required to pass through the USB device. The host needs a udev rule installed to handle the NCS2 device.

View File

@@ -29,13 +29,14 @@ Input args presets help make the config more readable and handle use cases for d
See [the camera specific docs](/configuration/camera_specific.md) for more info on non-standard cameras and recommendations for using them in Frigate. See [the camera specific docs](/configuration/camera_specific.md) for more info on non-standard cameras and recommendations for using them in Frigate.
| Preset | Usage | Other Notes | | Preset | Usage | Other Notes |
| ------------------------- | ------------------------- | --------------------------------------------------- | | -------------------------------- | ------------------------- | ------------------------------------------------------------------------------------------------ |
| preset-http-jpeg-generic | HTTP Live Jpeg | Recommend restreaming live jpeg instead | | preset-http-jpeg-generic | HTTP Live Jpeg | Recommend restreaming live jpeg instead |
| preset-http-mjpeg-generic | HTTP Mjpeg Stream | Recommend restreaming mjpeg stream instead | | preset-http-mjpeg-generic | HTTP Mjpeg Stream | Recommend restreaming mjpeg stream instead |
| preset-http-reolink | Reolink HTTP-FLV Stream | Only for reolink http, not when restreaming as rtsp | | preset-http-reolink | Reolink HTTP-FLV Stream | Only for reolink http, not when restreaming as rtsp |
| preset-rtmp-generic | RTMP Stream | | | preset-rtmp-generic | RTMP Stream | |
| preset-rtsp-generic | RTSP Stream | This is the default when nothing is specified | | preset-rtsp-generic | RTSP Stream | This is the default when nothing is specified |
| preset-rtsp-restream | RTSP Stream from restream | Use when using rtsp restream as source | | preset-rtsp-restream | RTSP Stream from restream | Use for rtsp restream as source for frigate |
| preset-rtsp-restream-low-latency | RTSP Stream from restream | Use for rtsp restream as source for frigate to lower latency, may cause issues with some cameras |
| preset-rtsp-udp | RTSP Stream via UDP | Use when camera is UDP only | | preset-rtsp-udp | RTSP Stream via UDP | Use when camera is UDP only |
| preset-rtsp-blue-iris | Blue Iris RTSP Stream | Use when consuming a stream from Blue Iris | | preset-rtsp-blue-iris | Blue Iris RTSP Stream | Use when consuming a stream from Blue Iris |
@@ -46,21 +47,22 @@ It is important to be mindful of input args when using restream because you can
::: :::
```yaml ```yaml
go2rtc:
streams:
reolink_cam: http://192.168.0.139/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=admin&password=password
cameras: cameras:
reolink_cam: reolink_cam:
ffmpeg: ffmpeg:
inputs: inputs:
- path: http://192.168.0.139/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=admin&password={FRIGATE_CAM_PASSWORD} - path: http://192.168.0.139/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=admin&password=password
input_args: preset-http-reolink input_args: preset-http-reolink
roles: roles:
- detect - detect
- path: rtsp://192.168.0.10:8554/garage - path: rtsp://127.0.0.1:8554/reolink_cam
input_args: preset-rtsp-generic input_args: preset-rtsp-generic
roles: roles:
- record - record
- path: http://192.168.0.139/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=admin&password={FRIGATE_CAM_PASSWORD}
roles:
- restream
``` ```
### Output Args Presets ### Output Args Presets

View File

@@ -15,23 +15,39 @@ ffmpeg:
hwaccel_args: preset-rpi-64-h264 hwaccel_args: preset-rpi-64-h264
``` ```
### Intel-based CPUs (<10th Generation) via Quicksync ### Intel-based CPUs (<10th Generation) via VAAPI
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. VAAPI is recommended for all generations of Intel-based CPUs if QSV does not work.
```yaml ```yaml
ffmpeg: ffmpeg:
hwaccel_args: preset-vaapi hwaccel_args: preset-vaapi
``` ```
**NOTICE**: With some of the processors, like the J4125, the default driver `iHD` doesn't seem to work correctly for hardware acceleration. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the frigate.yml for HA OS users](advanced.md#environment_vars). **NOTICE**: With some of the processors, like the J4125, the default driver `iHD` doesn't seem to work correctly for hardware acceleration. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the frigate.yml for HA OS users](advanced.md#environment_vars).
### Intel-based CPUs (>=10th Generation) via Quicksync ### Intel-based CPUs (>=10th Generation) via Quicksync
QSV must be set specifically based on the video encoding of the stream.
#### H.264 streams
```yaml ```yaml
ffmpeg: ffmpeg:
hwaccel_args: preset-intel-qsv-h264 hwaccel_args: preset-intel-qsv-h264
``` ```
#### H.265 streams
```yaml
ffmpeg:
hwaccel_args: preset-intel-qsv-h265
```
### AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver ### AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
**Note:** You also need to set `LIBVA_DRIVER_NAME=radeonsi` as an environment variable on the container. **Note:** You also need to set `LIBVA_DRIVER_NAME=radeonsi` as an environment variable on the container.
```yaml ```yaml

View File

@@ -105,6 +105,9 @@ model:
# Optional: Object detection model input tensor format # Optional: Object detection model input tensor format
# Valid values are nhwc or nchw (default: shown below) # Valid values are nhwc or nchw (default: shown below)
input_tensor: nhwc input_tensor: nhwc
# Optional: Object detection model type, currently only used with the OpenVINO detector
# Valid values are ssd or yolox (default: shown below)
model_type: ssd
# Optional: Label name modifications. These are merged into the standard labelmap. # Optional: Label name modifications. These are merged into the standard labelmap.
labelmap: labelmap:
2: vehicle 2: vehicle
@@ -350,7 +353,7 @@ rtmp:
enabled: False enabled: False
# Optional: Restream configuration # Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.0.1) # Uses https://github.com/AlexxIT/go2rtc (v1.1.2)
go2rtc: go2rtc:
# Optional: jsmpeg stream configuration for WebUI # Optional: jsmpeg stream configuration for WebUI

View File

@@ -50,11 +50,11 @@ cameras:
output_args: output_args:
record: preset-record-generic-audio-copy record: preset-record-generic-audio-copy
inputs: inputs:
- path: rtsp://127.0.0.1:8554/test_cam?video=copy&audio=aac # <--- the name here must match the name of the camera in restream - path: rtsp://127.0.0.1:8554/test_cam # <--- the name here must match the name of the camera in restream
input_args: preset-rtsp-restream input_args: preset-rtsp-restream
roles: roles:
- record - record
- path: rtsp://127.0.0.1:8554/test_cam_sub?video=copy # <--- the name here must match the name of the camera_sub in restream - path: rtsp://127.0.0.1:8554/test_cam_sub # <--- the name here must match the name of the camera_sub in restream
input_args: preset-rtsp-restream input_args: preset-rtsp-restream
roles: roles:
- detect - detect

View File

@@ -9,6 +9,12 @@ Frigate can restream your video feed as an RTSP feed for other applications such
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc#configuration) for more advanced configurations and features. Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc#configuration) for more advanced configurations and features.
:::note
You can access the go2rtc webUI at `http://frigate_ip:5000/live/webrtc` which can be helpful to debug as well as provide useful information about your camera streams.
:::
### Birdseye Restream ### Birdseye Restream
Birdseye RTSP restream can be enabled at `birdseye -> restream` and accessed at `rtsp://<frigate_host>:8554/birdseye`. Enabling the restream will cause birdseye to run 24/7 which may increase CPU usage somewhat. Birdseye RTSP restream can be enabled at `birdseye -> restream` and accessed at `rtsp://<frigate_host>:8554/birdseye`. Enabling the restream will cause birdseye to run 24/7 which may increase CPU usage somewhat.
@@ -56,7 +62,7 @@ cameras:
output_args: output_args:
record: preset-record-generic-audio-copy record: preset-record-generic-audio-copy
inputs: inputs:
- path: rtsp://127.0.0.1:8554/rtsp_cam?video=copy&audio=aac # <--- the name here must match the name of the camera in restream - path: rtsp://127.0.0.1:8554/rtsp_cam # <--- the name here must match the name of the camera in restream
input_args: preset-rtsp-restream input_args: preset-rtsp-restream
roles: roles:
- record - record
@@ -66,7 +72,7 @@ cameras:
output_args: output_args:
record: preset-record-generic-audio-copy record: preset-record-generic-audio-copy
inputs: inputs:
- path: rtsp://127.0.0.1:8554/http_cam?video=copy&audio=aac # <--- the name here must match the name of the camera in restream - path: rtsp://127.0.0.1:8554/http_cam # <--- the name here must match the name of the camera in restream
input_args: preset-rtsp-restream input_args: preset-rtsp-restream
roles: roles:
- record - record
@@ -99,11 +105,11 @@ cameras:
output_args: output_args:
record: preset-record-generic-audio-copy record: preset-record-generic-audio-copy
inputs: inputs:
- path: rtsp://127.0.0.1:8554/rtsp_cam?video=copy&audio=aac # <--- the name here must match the name of the camera in restream - path: rtsp://127.0.0.1:8554/rtsp_cam # <--- the name here must match the name of the camera in restream
input_args: preset-rtsp-restream input_args: preset-rtsp-restream
roles: roles:
- record - record
- path: rtsp://127.0.0.1:8554/rtsp_cam_sub?video=copy&audio=aac # <--- the name here must match the name of the camera_sub in restream - path: rtsp://127.0.0.1:8554/rtsp_cam_sub # <--- the name here must match the name of the camera_sub in restream
input_args: preset-rtsp-restream input_args: preset-rtsp-restream
roles: roles:
- detect - detect
@@ -112,11 +118,11 @@ cameras:
output_args: output_args:
record: preset-record-generic-audio-copy record: preset-record-generic-audio-copy
inputs: inputs:
- path: rtsp://127.0.0.1:8554/http_cam?video=copy&audio=aac # <--- the name here must match the name of the camera in restream - path: rtsp://127.0.0.1:8554/http_cam # <--- the name here must match the name of the camera in restream
input_args: preset-rtsp-restream input_args: preset-rtsp-restream
roles: roles:
- record - record
- path: rtsp://127.0.0.1:8554/http_cam_sub?video=copy&audio=aac # <--- the name here must match the name of the camera_sub in restream - path: rtsp://127.0.0.1:8554/http_cam_sub # <--- the name here must match the name of the camera_sub in restream
input_args: preset-rtsp-restream input_args: preset-rtsp-restream
roles: roles:
- detect - detect

View File

@@ -3,7 +3,7 @@ id: camera_setup
title: Camera setup title: Camera setup
--- ---
Cameras configured to output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. H.265 has better compression, but far less compatibility. Safari and Edge are the only browsers able to play H.265. Ideally, cameras should be configured directly for the desired resolutions and frame rates you want to use in Frigate. Reducing frame rates within Frigate will waste CPU resources decoding extra frames that are discarded. There are three different goals that you want to tune your stream configurations around. Cameras configured to output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. H.265 has better compression, but less compatibility. Chrome 108+, Safari and Edge are the only browsers able to play H.265 and only support a limited number of H.265 profiles. Ideally, cameras should be configured directly for the desired resolutions and frame rates you want to use in Frigate. Reducing frame rates within Frigate will waste CPU resources decoding extra frames that are discarded. There are three different goals that you want to tune your stream configurations around.
- **Detection**: This is the only stream that Frigate will decode for processing. Also, this is the stream where snapshots will be generated from. The resolution for detection should be tuned for the size of the objects you want to detect. See [Choosing a detect resolution](#choosing-a-detect-resolution) for more details. The recommended frame rate is 5fps, but may need to be higher for very fast moving objects. Higher resolutions and frame rates will drive higher CPU usage on your server. - **Detection**: This is the only stream that Frigate will decode for processing. Also, this is the stream where snapshots will be generated from. The resolution for detection should be tuned for the size of the objects you want to detect. See [Choosing a detect resolution](#choosing-a-detect-resolution) for more details. The recommended frame rate is 5fps, but may need to be higher for very fast moving objects. Higher resolutions and frame rates will drive higher CPU usage on your server.

View File

@@ -58,11 +58,20 @@ More information is available [in the detector docs](/configuration/detectors#op
Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known examples are below: Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known examples are below:
| Name | Inference Speed | Notes | | Name | Inference Speed | Notes |
| ------------------- | --------------- | --------------------------------------------------------------------- | | -------------------- | --------------- | --------------------------------------------------------------------- |
| Intel Celeron J4105 | ~ 25 ms | Inference speeds on CPU were ~ 150 ms |
| Intel Celeron N4020 | 50 - 200 ms | Inference speeds on CPU were ~ 800 ms, greatly depends on other loads |
| Intel NCS2 VPU | 60 - 65 ms | May vary based on host device | | Intel NCS2 VPU | 60 - 65 ms | May vary based on host device |
| Intel Celeron J4105 | ~ 25 ms | Inference speeds on CPU were 150 - 200 ms |
| Intel Celeron N3060 | 130 - 150 ms | Inference speeds on CPU were ~ 550 ms |
| Intel Celeron N3205U | ~ 120 ms | Inference speeds on CPU were ~ 380 ms |
| Intel Celeron N4020 | 50 - 200 ms | Inference speeds on CPU were ~ 800 ms, greatly depends on other loads |
| Intel i3 6100T | 15 - 35 ms | Inference speeds on CPU were 60 - 120 ms |
| Intel i3 8100 | ~ 15 ms | Inference speeds on CPU were ~ 65 ms |
| Intel i5 4590 | ~ 20 ms | Inference speeds on CPU were ~ 230 ms |
| Intel i5 6500 | ~ 15 ms | Inference speeds on CPU were ~ 150 ms |
| Intel i5 7200u | 15 - 25 ms | Inference speeds on CPU were ~ 150 ms |
| Intel i5 7500 | ~ 15 ms | Inference speeds on CPU were ~ 260 ms |
| Intel i5 1135G7 | 10 - 15 ms | | | Intel i5 1135G7 | 10 - 15 ms | |
| Intel i5 12600K | ~ 15 ms | Inference speeds on CPU were ~ 35 ms |
### TensorRT ### TensorRT

View File

@@ -23,7 +23,7 @@ The easiest live view to get working is MSE. After adding this to the config, re
### What if my video doesn't play? ### What if my video doesn't play?
If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration: If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then the video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration:
```yaml ```yaml
go2rtc: go2rtc:
@@ -33,6 +33,15 @@ go2rtc:
- "ffmpeg:back#video=h264" - "ffmpeg:back#video=h264"
``` ```
Some camera streams may need to use the ffmpeg module in go2rtc. This has the downside of slower startup times, but has compatibility with more stream types.
```yaml
go2rtc:
streams:
back:
- ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
```
If you can see the video but do not have audio, this is most likely because your camera's audio stream is not AAC. If possible, update your camera's audio settings to AAC. If your cameras do not support AAC audio, you will need to tell go2rtc to re-encode the audio to AAC on demand if you want audio. This will use additional CPU and add some latency. To add AAC audio on demand, you can update your go2rtc config as follows: If you can see the video but do not have audio, this is most likely because your camera's audio stream is not AAC. If possible, update your camera's audio settings to AAC. If your cameras do not support AAC audio, you will need to tell go2rtc to re-encode the audio to AAC on demand if you want audio. This will use additional CPU and add some latency. To add AAC audio on demand, you can update your go2rtc config as follows:
```yaml ```yaml
@@ -53,6 +62,15 @@ go2rtc:
- "ffmpeg:back#video=h264#audio=aac" - "ffmpeg:back#video=h264#audio=aac"
``` ```
When using the ffmpeg module, you would add AAC audio like this:
```yaml
go2rtc:
streams:
back:
- "ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2#video=copy#audio=copy#audio=aac"
```
## Next steps ## Next steps
1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera). 1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera).

View File

@@ -183,8 +183,7 @@ class FrigateApp:
if self.config.mqtt.enabled: if self.config.mqtt.enabled:
comms.append(MqttClient(self.config)) comms.append(MqttClient(self.config))
self.ws_client = WebSocketClient(self.config) comms.append(WebSocketClient(self.config))
comms.append(self.ws_client)
self.dispatcher = Dispatcher(self.config, self.camera_metrics, comms) self.dispatcher = Dispatcher(self.config, self.camera_metrics, comms)
def start_detectors(self) -> None: def start_detectors(self) -> None:
@@ -417,7 +416,17 @@ class FrigateApp:
logger.info(f"Stopping...") logger.info(f"Stopping...")
self.stop_event.set() self.stop_event.set()
self.ws_client.stop() for detector in self.detectors.values():
detector.stop()
# Empty the detection queue and set the events for all requests
while not self.detection_queue.empty():
connection_id = self.detection_queue.get(timeout=1)
self.detection_out_events[connection_id].set()
self.detection_queue.close()
self.detection_queue.join_thread()
self.dispatcher.stop()
self.detected_frames_processor.join() self.detected_frames_processor.join()
self.event_processor.join() self.event_processor.join()
self.event_cleanup.join() self.event_cleanup.join()
@@ -427,10 +436,20 @@ class FrigateApp:
self.frigate_watchdog.join() self.frigate_watchdog.join()
self.db.stop() self.db.stop()
for detector in self.detectors.values():
detector.stop()
while len(self.detection_shms) > 0: while len(self.detection_shms) > 0:
shm = self.detection_shms.pop() shm = self.detection_shms.pop()
shm.close() shm.close()
shm.unlink() shm.unlink()
for queue in [
self.event_queue,
self.event_processed_queue,
self.video_output_queue,
self.detected_frames_queue,
self.recordings_info_queue,
self.log_queue,
]:
while not queue.empty():
queue.get_nowait()
queue.close()
queue.join_thread()

View File

@@ -27,6 +27,11 @@ class Communicator(ABC):
"""Pass receiver so communicators can pass commands.""" """Pass receiver so communicators can pass commands."""
pass pass
@abstractmethod
def stop(self) -> None:
"""Stop the communicator."""
pass
class Dispatcher: class Dispatcher:
"""Handle communication between Frigate and communicators.""" """Handle communication between Frigate and communicators."""
@@ -72,6 +77,10 @@ class Dispatcher:
for comm in self.comms: for comm in self.comms:
comm.publish(topic, payload, retain) comm.publish(topic, payload, retain)
def stop(self) -> None:
for comm in self.comms:
comm.stop()
def _on_detect_command(self, camera_name: str, payload: str) -> None: def _on_detect_command(self, camera_name: str, payload: str) -> None:
"""Callback for detect topic.""" """Callback for detect topic."""
detect_settings = self.config.cameras[camera_name].detect detect_settings = self.config.cameras[camera_name].detect

View File

@@ -35,6 +35,9 @@ class MqttClient(Communicator): # type: ignore[misc]
f"{self.mqtt_config.topic_prefix}/{topic}", payload, retain=retain f"{self.mqtt_config.topic_prefix}/{topic}", payload, retain=retain
) )
def stop(self) -> None:
self.client.disconnect()
def _set_initial_topics(self) -> None: def _set_initial_topics(self) -> None:
"""Set initial state topics.""" """Set initial state topics."""
for camera_name, camera in self.config.cameras.items(): for camera_name, camera in self.config.cameras.items():

View File

@@ -95,3 +95,4 @@ class WebSocketClient(Communicator): # type: ignore[misc]
self.websocket_server.manager.join() self.websocket_server.manager.join()
self.websocket_server.shutdown() self.websocket_server.shutdown()
self.websocket_thread.join() self.websocket_thread.join()
logger.info("Exiting websocket client...")

View File

@@ -23,6 +23,11 @@ class InputTensorEnum(str, Enum):
nhwc = "nhwc" nhwc = "nhwc"
class ModelTypeEnum(str, Enum):
ssd = "ssd"
yolox = "yolox"
class ModelConfig(BaseModel): class ModelConfig(BaseModel):
path: Optional[str] = Field(title="Custom Object detection model path.") path: Optional[str] = Field(title="Custom Object detection model path.")
labelmap_path: Optional[str] = Field(title="Label map for custom object detector.") labelmap_path: Optional[str] = Field(title="Label map for custom object detector.")
@@ -37,6 +42,9 @@ class ModelConfig(BaseModel):
input_pixel_format: PixelFormatEnum = Field( input_pixel_format: PixelFormatEnum = Field(
default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format" default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format"
) )
model_type: ModelTypeEnum = Field(
default=ModelTypeEnum.ssd, title="Object Detection Model Type"
)
_merged_labelmap: Optional[Dict[int, str]] = PrivateAttr() _merged_labelmap: Optional[Dict[int, str]] = PrivateAttr()
_colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr() _colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr()

View File

@@ -3,7 +3,7 @@ import numpy as np
import openvino.runtime as ov import openvino.runtime as ov
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
from typing import Literal from typing import Literal
from pydantic import Extra, Field from pydantic import Extra, Field
@@ -24,12 +24,18 @@ class OvDetector(DetectionApi):
def __init__(self, detector_config: OvDetectorConfig): def __init__(self, detector_config: OvDetectorConfig):
self.ov_core = ov.Core() self.ov_core = ov.Core()
self.ov_model = self.ov_core.read_model(detector_config.model.path) self.ov_model = self.ov_core.read_model(detector_config.model.path)
self.ov_model_type = detector_config.model.model_type
self.h = detector_config.model.height
self.w = detector_config.model.width
self.interpreter = self.ov_core.compile_model( self.interpreter = self.ov_core.compile_model(
model=self.ov_model, device_name=detector_config.device model=self.ov_model, device_name=detector_config.device
) )
logger.info(f"Model Input Shape: {self.interpreter.input(0).shape}") logger.info(f"Model Input Shape: {self.interpreter.input(0).shape}")
self.output_indexes = 0 self.output_indexes = 0
while True: while True:
try: try:
tensor_shape = self.interpreter.output(self.output_indexes).shape tensor_shape = self.interpreter.output(self.output_indexes).shape
@@ -38,12 +44,34 @@ class OvDetector(DetectionApi):
except: except:
logger.info(f"Model has {self.output_indexes} Output Tensors") logger.info(f"Model has {self.output_indexes} Output Tensors")
break break
if self.ov_model_type == ModelTypeEnum.yolox:
self.num_classes = tensor_shape[2] - 5
logger.info(f"YOLOX model has {self.num_classes} classes")
self.set_strides_grids()
def set_strides_grids(self):
grids = []
expanded_strides = []
strides = [8, 16, 32]
hsizes = [self.h // stride for stride in strides]
wsizes = [self.w // stride for stride in strides]
for hsize, wsize, stride in zip(hsizes, wsizes, strides):
xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid)
shape = grid.shape[:2]
expanded_strides.append(np.full((*shape, 1), stride))
self.grids = np.concatenate(grids, 1)
self.expanded_strides = np.concatenate(expanded_strides, 1)
def detect_raw(self, tensor_input): def detect_raw(self, tensor_input):
infer_request = self.interpreter.create_infer_request() infer_request = self.interpreter.create_infer_request()
infer_request.infer([tensor_input]) infer_request.infer([tensor_input])
if self.ov_model_type == ModelTypeEnum.ssd:
results = infer_request.get_output_tensor() results = infer_request.get_output_tensor()
detections = np.zeros((20, 6), np.float32) detections = np.zeros((20, 6), np.float32)
@@ -62,5 +90,46 @@ class OvDetector(DetectionApi):
object_detected[5], # x_max object_detected[5], # x_max
] ]
i += 1 i += 1
return detections
elif self.ov_model_type == ModelTypeEnum.yolox:
out_tensor = infer_request.get_output_tensor()
# [x, y, h, w, box_score, class_no_1, ..., class_no_80],
results = out_tensor.data
results[..., :2] = (results[..., :2] + self.grids) * self.expanded_strides
results[..., 2:4] = np.exp(results[..., 2:4]) * self.expanded_strides
image_pred = results[0, ...]
class_conf = np.max(
image_pred[:, 5 : 5 + self.num_classes], axis=1, keepdims=True
)
class_pred = np.argmax(image_pred[:, 5 : 5 + self.num_classes], axis=1)
class_pred = np.expand_dims(class_pred, axis=1)
conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= 0.3).squeeze()
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
dets = np.concatenate((image_pred[:, :5], class_conf, class_pred), axis=1)
dets = dets[conf_mask]
ordered = dets[dets[:, 5].argsort()[::-1]][:20]
detections = np.zeros((20, 6), np.float32)
i = 0
for object_detected in ordered:
if i < 20:
detections[i] = [
object_detected[6], # Label ID
object_detected[5], # Confidence
(object_detected[1] - (object_detected[3] / 2))
/ self.h, # y_min
(object_detected[0] - (object_detected[2] / 2))
/ self.w, # x_min
(object_detected[1] + (object_detected[3] / 2))
/ self.h, # y_max
(object_detected[0] + (object_detected[2] / 2))
/ self.w, # x_max
]
i += 1
else:
break
return detections return detections

View File

@@ -67,7 +67,7 @@ class EventProcessor(threading.Thread):
while not self.stop_event.is_set(): while not self.stop_event.is_set():
try: try:
event_type, camera, event_data = self.event_queue.get(timeout=10) event_type, camera, event_data = self.event_queue.get(timeout=1)
except queue.Empty: except queue.Empty:
continue continue

View File

@@ -1,14 +1,52 @@
"""Handles inserting and maintaining ffmpeg presets.""" """Handles inserting and maintaining ffmpeg presets."""
import logging
import os import os
from typing import Any from typing import Any
from frigate.version import VERSION from frigate.version import VERSION
from frigate.const import BTBN_PATH from frigate.const import BTBN_PATH
from frigate.util import vainfo_hwaccel
logger = logging.getLogger(__name__)
class LibvaGpuSelector:
"Automatically selects the correct libva GPU."
_selected_gpu = None
def get_selected_gpu(self) -> str:
"""Get selected libva GPU."""
if not os.path.exists("/dev/dri"):
return ""
if self._selected_gpu:
return self._selected_gpu
devices = list(filter(lambda d: d.startswith("render"), os.listdir("/dev/dri")))
if len(devices) < 2:
self._selected_gpu = "/dev/dri/renderD128"
return self._selected_gpu
for device in devices:
check = vainfo_hwaccel(device_name=device)
logger.debug(f"{device} return vainfo status code: {check.returncode}")
if check.returncode == 0:
self._selected_gpu = f"/dev/dri/{device}"
return self._selected_gpu
return ""
TIMEOUT_PARAM = "-timeout" if os.path.exists(BTBN_PATH) else "-stimeout" TIMEOUT_PARAM = "-timeout" if os.path.exists(BTBN_PATH) else "-stimeout"
_gpu_selector = LibvaGpuSelector()
_user_agent_args = [ _user_agent_args = [
"-user_agent", "-user_agent",
f"FFmpeg Frigate/{VERSION}", f"FFmpeg Frigate/{VERSION}",
@@ -23,7 +61,7 @@ PRESETS_HW_ACCEL_DECODE = {
"-hwaccel", "-hwaccel",
"vaapi", "vaapi",
"-hwaccel_device", "-hwaccel_device",
"/dev/dri/renderD128", _gpu_selector.get_selected_gpu(),
"-hwaccel_output_format", "-hwaccel_output_format",
"vaapi", "vaapi",
], ],
@@ -31,7 +69,7 @@ PRESETS_HW_ACCEL_DECODE = {
"-hwaccel", "-hwaccel",
"qsv", "qsv",
"-qsv_device", "-qsv_device",
"/dev/dri/renderD128", _gpu_selector.get_selected_gpu(),
"-hwaccel_output_format", "-hwaccel_output_format",
"qsv", "qsv",
"-c:v", "-c:v",
@@ -43,7 +81,7 @@ PRESETS_HW_ACCEL_DECODE = {
"-hwaccel", "-hwaccel",
"qsv", "qsv",
"-qsv_device", "-qsv_device",
"/dev/dri/renderD128", _gpu_selector.get_selected_gpu(),
"-hwaccel_output_format", "-hwaccel_output_format",
"qsv", "qsv",
"-c:v", "-c:v",
@@ -95,6 +133,7 @@ PRESETS_HW_ACCEL_SCALE = {
PRESETS_HW_ACCEL_ENCODE = { PRESETS_HW_ACCEL_ENCODE = {
"preset-rpi-32-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -g 50 -bf 0 {1}", "preset-rpi-32-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -g 50 -bf 0 {1}",
"preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -g 50 -bf 0 {1}", "preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -g 50 -bf 0 {1}",
"preset-vaapi": "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {1}",
"preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}", "preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
"preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}", "preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
"preset-nvidia-h264": "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}", "preset-nvidia-h264": "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}",
@@ -142,6 +181,7 @@ def parse_preset_hardware_acceleration_encode(arg: Any, input: str, output: str)
return PRESETS_HW_ACCEL_ENCODE.get(arg, PRESETS_HW_ACCEL_ENCODE["default"]).format( return PRESETS_HW_ACCEL_ENCODE.get(arg, PRESETS_HW_ACCEL_ENCODE["default"]).format(
input, input,
output, output,
_gpu_selector.get_selected_gpu(),
) )
@@ -231,6 +271,13 @@ PRESETS_INPUT = {
"1", "1",
], ],
"preset-rtsp-restream": _user_agent_args "preset-rtsp-restream": _user_agent_args
+ [
"-rtsp_transport",
"tcp",
TIMEOUT_PARAM,
"5000000",
],
"preset-rtsp-restream-low-latency": _user_agent_args
+ [ + [
"-rtsp_transport", "-rtsp_transport",
"tcp", "tcp",

View File

@@ -2,11 +2,16 @@
import logging import logging
import threading import threading
import os import os
import signal
import queue import queue
import multiprocessing as mp
from multiprocessing.queues import Queue from multiprocessing.queues import Queue
from logging import handlers from logging import handlers
from typing import Optional
from types import FrameType
from setproctitle import setproctitle from setproctitle import setproctitle
from typing import Deque from typing import Deque, Optional
from types import FrameType
from collections import deque from collections import deque
from frigate.util import clean_camera_user_pass from frigate.util import clean_camera_user_pass
@@ -34,10 +39,21 @@ def log_process(log_queue: Queue) -> None:
threading.current_thread().name = f"logger" threading.current_thread().name = f"logger"
setproctitle("frigate.logger") setproctitle("frigate.logger")
listener_configurer() listener_configurer()
stop_event = mp.Event()
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
while True: while True:
try: try:
record = log_queue.get(timeout=5) record = log_queue.get(timeout=1)
except (queue.Empty, KeyboardInterrupt): except (queue.Empty, KeyboardInterrupt):
if stop_event.is_set():
break
continue continue
logger = logging.getLogger(record.name) logger = logging.getLogger(record.name)
logger.handle(record) logger.handle(record)

View File

@@ -88,6 +88,7 @@ def run_detector(
stop_event = mp.Event() stop_event = mp.Event()
def receiveSignal(signalNumber, frame): def receiveSignal(signalNumber, frame):
logger.info("Signal to exit detection process...")
stop_event.set() stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal) signal.signal(signal.SIGTERM, receiveSignal)
@@ -104,7 +105,7 @@ def run_detector(
while not stop_event.is_set(): while not stop_event.is_set():
try: try:
connection_id = detection_queue.get(timeout=5) connection_id = detection_queue.get(timeout=1)
except queue.Empty: except queue.Empty:
continue continue
input_frame = frame_manager.get( input_frame = frame_manager.get(
@@ -125,6 +126,8 @@ def run_detector(
avg_speed.value = (avg_speed.value * 9 + duration) / 10 avg_speed.value = (avg_speed.value * 9 + duration) / 10
logger.info("Exited detection process...")
class ObjectDetectProcess: class ObjectDetectProcess:
def __init__( def __init__(
@@ -144,6 +147,9 @@ class ObjectDetectProcess:
self.start_or_restart() self.start_or_restart()
def stop(self): def stop(self):
# if the process has already exited on its own, just return
if self.detect_process and self.detect_process.exitcode:
return
self.detect_process.terminate() self.detect_process.terminate()
logging.info("Waiting for detection process to exit gracefully...") logging.info("Waiting for detection process to exit gracefully...")
self.detect_process.join(timeout=30) self.detect_process.join(timeout=30)
@@ -151,6 +157,7 @@ class ObjectDetectProcess:
logging.info("Detection process didnt exit. Force killing...") logging.info("Detection process didnt exit. Force killing...")
self.detect_process.kill() self.detect_process.kill()
self.detect_process.join() self.detect_process.join()
logging.info("Detection process has exited...")
def start_or_restart(self): def start_or_restart(self):
self.detection_start.value = 0.0 self.detection_start.value = 0.0
@@ -173,12 +180,13 @@ class ObjectDetectProcess:
class RemoteObjectDetector: class RemoteObjectDetector:
def __init__(self, name, labels, detection_queue, event, model_config): def __init__(self, name, labels, detection_queue, event, model_config, stop_event):
self.labels = labels self.labels = labels
self.name = name self.name = name
self.fps = EventsPerSecond() self.fps = EventsPerSecond()
self.detection_queue = detection_queue self.detection_queue = detection_queue
self.event = event self.event = event
self.stop_event = stop_event
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False) self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
self.np_shm = np.ndarray( self.np_shm = np.ndarray(
(1, model_config.height, model_config.width, 3), (1, model_config.height, model_config.width, 3),
@@ -193,11 +201,14 @@ class RemoteObjectDetector:
def detect(self, tensor_input, threshold=0.4): def detect(self, tensor_input, threshold=0.4):
detections = [] detections = []
if self.stop_event.is_set():
return detections
# copy input to shared memory # copy input to shared memory
self.np_shm[:] = tensor_input[:] self.np_shm[:] = tensor_input[:]
self.event.clear() self.event.clear()
self.detection_queue.put(self.name) self.detection_queue.put(self.name)
result = self.event.wait(timeout=10.0) result = self.event.wait(timeout=5.0)
# if it timed out # if it timed out
if result is None: if result is None:

View File

@@ -901,7 +901,7 @@ class TrackedObjectProcessor(threading.Thread):
current_tracked_objects, current_tracked_objects,
motion_boxes, motion_boxes,
regions, regions,
) = self.tracked_objects_queue.get(True, 10) ) = self.tracked_objects_queue.get(True, 1)
except queue.Empty: except queue.Empty:
continue continue

View File

@@ -109,14 +109,15 @@ class FFMpegConverter:
class BroadcastThread(threading.Thread): class BroadcastThread(threading.Thread):
def __init__(self, camera, converter, websocket_server): def __init__(self, camera, converter, websocket_server, stop_event):
super(BroadcastThread, self).__init__() super(BroadcastThread, self).__init__()
self.camera = camera self.camera = camera
self.converter = converter self.converter = converter
self.websocket_server = websocket_server self.websocket_server = websocket_server
self.stop_event = stop_event
def run(self): def run(self):
while True: while not self.stop_event.is_set():
buf = self.converter.read(65536) buf = self.converter.read(65536)
if buf: if buf:
manager = self.websocket_server.manager manager = self.websocket_server.manager
@@ -426,7 +427,7 @@ def output_frames(config: FrigateConfig, video_output_queue):
cam_config.live.quality, cam_config.live.quality,
) )
broadcasters[camera] = BroadcastThread( broadcasters[camera] = BroadcastThread(
camera, converters[camera], websocket_server camera, converters[camera], websocket_server, stop_event
) )
if config.birdseye.enabled: if config.birdseye.enabled:
@@ -439,7 +440,7 @@ def output_frames(config: FrigateConfig, video_output_queue):
config.birdseye.restream, config.birdseye.restream,
) )
broadcasters["birdseye"] = BroadcastThread( broadcasters["birdseye"] = BroadcastThread(
"birdseye", converters["birdseye"], websocket_server "birdseye", converters["birdseye"], websocket_server, stop_event
) )
websocket_thread.start() websocket_thread.start()
@@ -463,7 +464,7 @@ def output_frames(config: FrigateConfig, video_output_queue):
current_tracked_objects, current_tracked_objects,
motion_boxes, motion_boxes,
regions, regions,
) = video_output_queue.get(True, 10) ) = video_output_queue.get(True, 1)
except queue.Empty: except queue.Empty:
continue continue

View File

@@ -283,8 +283,10 @@ class StatsEmitter(threading.Thread):
def run(self) -> None: def run(self) -> None:
time.sleep(10) time.sleep(10)
while not self.stop_event.wait(self.config.mqtt.stats_interval): while not self.stop_event.wait(self.config.mqtt.stats_interval):
logger.debug("Starting stats collection")
stats = stats_snapshot( stats = stats_snapshot(
self.config, self.stats_tracking, self.hwaccel_errors self.config, self.stats_tracking, self.hwaccel_errors
) )
self.dispatcher.publish("stats", json.dumps(stats), retain=False) self.dispatcher.publish("stats", json.dumps(stats), retain=False)
logger.info(f"Exiting watchdog...") logger.debug("Finished stats collection")
logger.info(f"Exiting stats emitter...")

View File

@@ -14,7 +14,7 @@ from abc import ABC, abstractmethod
from collections import Counter from collections import Counter
from collections.abc import Mapping from collections.abc import Mapping
from multiprocessing import shared_memory from multiprocessing import shared_memory
from typing import Any, AnyStr, Tuple from typing import Any, AnyStr, Optional, Tuple
import cv2 import cv2
import numpy as np import numpy as np
@@ -926,6 +926,17 @@ def get_nvidia_gpu_stats() -> dict[str, str]:
"--format=csv", "--format=csv",
] ]
if (
"CUDA_VISIBLE_DEVICES" in os.environ
and os.environ["CUDA_VISIBLE_DEVICES"].isdigit()
):
nvidia_smi_command.extend(["--id", os.environ["CUDA_VISIBLE_DEVICES"]])
elif (
"NVIDIA_VISIBLE_DEVICES" in os.environ
and os.environ["NVIDIA_VISIBLE_DEVICES"].isdigit()
):
nvidia_smi_command.extend(["--id", os.environ["NVIDIA_VISIBLE_DEVICES"]])
p = sp.run( p = sp.run(
nvidia_smi_command, nvidia_smi_command,
encoding="ascii", encoding="ascii",
@@ -965,9 +976,13 @@ def ffprobe_stream(path: str) -> sp.CompletedProcess:
return sp.run(ffprobe_cmd, capture_output=True) return sp.run(ffprobe_cmd, capture_output=True)
def vainfo_hwaccel() -> sp.CompletedProcess: def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
"""Run vainfo.""" """Run vainfo."""
ffprobe_cmd = ["vainfo"] ffprobe_cmd = (
["vainfo"]
if not device_name
else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"]
)
return sp.run(ffprobe_cmd, capture_output=True) return sp.run(ffprobe_cmd, capture_output=True)

View File

@@ -160,6 +160,7 @@ def capture_frames(
fps: mp.Value, fps: mp.Value,
skipped_fps: mp.Value, skipped_fps: mp.Value,
current_frame: mp.Value, current_frame: mp.Value,
stop_event: mp.Event,
): ):
frame_size = frame_shape[0] * frame_shape[1] frame_size = frame_shape[0] * frame_shape[1]
@@ -177,6 +178,9 @@ def capture_frames(
try: try:
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size) frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
except Exception as e: except Exception as e:
# shutdown has been initiated
if stop_event.is_set():
break
logger.error(f"{camera_name}: Unable to read frames from ffmpeg process.") logger.error(f"{camera_name}: Unable to read frames from ffmpeg process.")
if ffmpeg_process.poll() != None: if ffmpeg_process.poll() != None:
@@ -340,6 +344,7 @@ class CameraWatchdog(threading.Thread):
self.frame_shape, self.frame_shape,
self.frame_queue, self.frame_queue,
self.camera_fps, self.camera_fps,
self.stop_event,
) )
self.capture_thread.start() self.capture_thread.start()
@@ -368,13 +373,16 @@ class CameraWatchdog(threading.Thread):
class CameraCapture(threading.Thread): class CameraCapture(threading.Thread):
def __init__(self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps): def __init__(
self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps, stop_event
):
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.name = f"capture:{camera_name}" self.name = f"capture:{camera_name}"
self.camera_name = camera_name self.camera_name = camera_name
self.frame_shape = frame_shape self.frame_shape = frame_shape
self.frame_queue = frame_queue self.frame_queue = frame_queue
self.fps = fps self.fps = fps
self.stop_event = stop_event
self.skipped_fps = EventsPerSecond() self.skipped_fps = EventsPerSecond()
self.frame_manager = SharedMemoryFrameManager() self.frame_manager = SharedMemoryFrameManager()
self.ffmpeg_process = ffmpeg_process self.ffmpeg_process = ffmpeg_process
@@ -392,6 +400,7 @@ class CameraCapture(threading.Thread):
self.fps, self.fps,
self.skipped_fps, self.skipped_fps,
self.current_frame, self.current_frame,
self.stop_event,
) )
@@ -404,6 +413,9 @@ def capture_camera(name, config: CameraConfig, process_info):
signal.signal(signal.SIGTERM, receiveSignal) signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal) signal.signal(signal.SIGINT, receiveSignal)
threading.current_thread().name = f"capture:{name}"
setproctitle(f"frigate.capture:{name}")
frame_queue = process_info["frame_queue"] frame_queue = process_info["frame_queue"]
camera_watchdog = CameraWatchdog( camera_watchdog = CameraWatchdog(
name, name,
@@ -458,7 +470,7 @@ def track_camera(
motion_contour_area, motion_contour_area,
) )
object_detector = RemoteObjectDetector( object_detector = RemoteObjectDetector(
name, labelmap, detection_queue, result_connection, model_config name, labelmap, detection_queue, result_connection, model_config, stop_event
) )
object_tracker = ObjectTracker(config.detect) object_tracker = ObjectTracker(config.detect)
@@ -598,7 +610,7 @@ def process_frames(
break break
try: try:
frame_time = frame_queue.get(True, 10) frame_time = frame_queue.get(True, 1)
except queue.Empty: except queue.Empty:
continue continue
@@ -784,6 +796,7 @@ def process_frames(
refining = True refining = True
else: else:
selected_objects.append(obj) selected_objects.append(obj)
# set the detections list to only include top, complete objects # set the detections list to only include top, complete objects
# and new detections # and new detections
detections = selected_objects detections = selected_objects

View File

@@ -11,7 +11,15 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
const [hasLoaded, setHasLoaded] = useState(false); const [hasLoaded, setHasLoaded] = useState(false);
const containerRef = useRef(null); const containerRef = useRef(null);
const canvasRef = useRef(null); const canvasRef = useRef(null);
const [{ width: availableWidth }] = useResizeObserver(containerRef); const [{ width: containerWidth }] = useResizeObserver(containerRef);
// Add scrollbar width (when visible) to the available observer width to eliminate screen juddering.
// https://github.com/blakeblackshear/frigate/issues/1657
let scrollBarWidth = 0;
if (window.innerWidth && document.body.offsetWidth) {
scrollBarWidth = window.innerWidth - document.body.offsetWidth;
}
const availableWidth = scrollBarWidth ? containerWidth + scrollBarWidth : containerWidth;
const { name } = config ? config.cameras[camera] : ''; const { name } = config ? config.cameras[camera] : '';
const enabled = config ? config.cameras[camera].enabled : 'True'; const enabled = config ? config.cameras[camera].enabled : 'True';
@@ -22,7 +30,11 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
const scaledHeight = Math.floor(availableWidth / aspectRatio); const scaledHeight = Math.floor(availableWidth / aspectRatio);
return stretch ? scaledHeight : Math.min(scaledHeight, height); return stretch ? scaledHeight : Math.min(scaledHeight, height);
}, [availableWidth, aspectRatio, height, stretch]); }, [availableWidth, aspectRatio, height, stretch]);
const scaledWidth = useMemo(() => Math.ceil(scaledHeight * aspectRatio), [scaledHeight, aspectRatio]); const scaledWidth = useMemo(() => Math.ceil(scaledHeight * aspectRatio - scrollBarWidth), [
scaledHeight,
aspectRatio,
scrollBarWidth,
]);
const img = useMemo(() => new Image(), []); const img = useMemo(() => new Image(), []);
img.onload = useCallback( img.onload = useCallback(

View File

@@ -4,6 +4,7 @@ import { useCallback, useEffect, useState } from 'preact/hooks';
import ButtonsTabbed from '../components/ButtonsTabbed'; import ButtonsTabbed from '../components/ButtonsTabbed';
import useSWR from 'swr'; import useSWR from 'swr';
import Button from '../components/Button'; import Button from '../components/Button';
import copy from 'copy-to-clipboard';
export default function Logs() { export default function Logs() {
const [logService, setLogService] = useState('frigate'); const [logService, setLogService] = useState('frigate');
@@ -14,10 +15,7 @@ export default function Logs() {
const { data: nginxLogs } = useSWR('logs/nginx'); const { data: nginxLogs } = useSWR('logs/nginx');
const handleCopyLogs = useCallback(() => { const handleCopyLogs = useCallback(() => {
async function copy() { copy(logs);
await window.navigator.clipboard.writeText(logs);
}
copy();
}, [logs]); }, [logs]);
useEffect(() => { useEffect(() => {

View File

@@ -187,8 +187,9 @@ export default function System() {
<div className="p-2"> <div className="p-2">
{gpu_usages[gpu]['gpu'] == -1 ? ( {gpu_usages[gpu]['gpu'] == -1 ? (
<div className="p-4"> <div className="p-4">
There was an error getting usage stats. Either your GPU does not support this or Frigate does There was an error getting usage stats. This does not mean hardware acceleration is not working.
not have proper access. Either your GPU does not support this or Frigate does not have proper access to get statistics.
This is expected for the Home Assistant addon.
</div> </div>
) : ( ) : (
<Table className="w-full"> <Table className="w-full">