forked from Github/frigate
Compare commits
73 Commits
v0.10.1
...
v0.10.0-be
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b1e84ca7fe | ||
|
|
e6ec5cb097 | ||
|
|
23c70acd51 | ||
|
|
091648187f | ||
|
|
2b7d38f947 | ||
|
|
f801930588 | ||
|
|
955c2779d9 | ||
|
|
037f8667a6 | ||
|
|
307068a61f | ||
|
|
077d900b44 | ||
|
|
92f9195075 | ||
|
|
82c60093d1 | ||
|
|
944b9181e0 | ||
|
|
326b368e82 | ||
|
|
040d8c9778 | ||
|
|
273f803c7c | ||
|
|
bd8e23833c | ||
|
|
9edf38347c | ||
|
|
1569ce7cf6 | ||
|
|
db1255aa7f | ||
|
|
609b436ed8 | ||
|
|
95bdf9fe34 | ||
|
|
251d29aa38 | ||
|
|
156e1a4dc2 | ||
|
|
a5c13e7455 | ||
|
|
fcb4aaef0d | ||
|
|
589432bc89 | ||
|
|
b19a02888a | ||
|
|
18fd50dfce | ||
|
|
df0246aed8 | ||
|
|
cbb2882123 | ||
|
|
9f18629df3 | ||
|
|
63f8034e46 | ||
|
|
f3efc0667f | ||
|
|
af001321a8 | ||
|
|
92e08b92f5 | ||
|
|
26241b0877 | ||
|
|
c1155af169 | ||
|
|
77c1f1bb1b | ||
|
|
ae3c01fe2d | ||
|
|
7a2a85d253 | ||
|
|
77c66d4e49 | ||
|
|
494e5ac4ec | ||
|
|
63b7465452 | ||
|
|
e6d2df5661 | ||
|
|
a3301e0347 | ||
|
|
3d556cc2cb | ||
|
|
585efe1a0f | ||
|
|
c7d47439dd | ||
|
|
19a6978228 | ||
|
|
1ebb8a54bf | ||
|
|
ae968044d6 | ||
|
|
b912851e49 | ||
|
|
14c74e4361 | ||
|
|
51fb532e1a | ||
|
|
3541f966e3 | ||
|
|
c7faef8faa | ||
|
|
cdd3000315 | ||
|
|
1c1c28d0e5 | ||
|
|
4422e86907 | ||
|
|
8f43a2d109 | ||
|
|
bd7755fdd3 | ||
|
|
d554175631 | ||
|
|
ff667b019a | ||
|
|
57dcb29f8b | ||
|
|
9dc6c423b7 | ||
|
|
58117e2a3e | ||
|
|
5bec438f9c | ||
|
|
24cc63d6d3 | ||
|
|
d17bd74c9a | ||
|
|
8f101ccca8 | ||
|
|
b63c56d810 | ||
|
|
61c62d4685 |
2
Makefile
2
Makefile
@@ -3,7 +3,7 @@ default_target: amd64_frigate
|
||||
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
|
||||
|
||||
version:
|
||||
echo "VERSION='0.10.1-$(COMMIT_HASH)'" > frigate/version.py
|
||||
echo "VERSION='0.10.0-$(COMMIT_HASH)'" > frigate/version.py
|
||||
|
||||
web:
|
||||
docker build --tag frigate-web --file docker/Dockerfile.web web/
|
||||
|
||||
@@ -22,5 +22,3 @@ RUN pip3 install pylint black
|
||||
# Install Node 14
|
||||
RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - \
|
||||
&& apt-get install -y nodejs
|
||||
|
||||
RUN npm install -g npm@latest
|
||||
|
||||
@@ -43,11 +43,6 @@ If you are storing your database on a network share (SMB, NFS, etc), you may get
|
||||
|
||||
This may need to be in a custom location if network storage is used for the media folder.
|
||||
|
||||
```yaml
|
||||
database:
|
||||
path: /path/to/frigate.db
|
||||
```
|
||||
|
||||
### `model`
|
||||
|
||||
If using a custom model, the width and height will need to be specified.
|
||||
|
||||
@@ -19,34 +19,6 @@ output_args:
|
||||
rtmp: -c:v libx264 -an -f flv
|
||||
```
|
||||
|
||||
### JPEG Stream Cameras
|
||||
|
||||
Cameras using a live changing jpeg image will need input parameters as below
|
||||
|
||||
```yaml
|
||||
input_args:
|
||||
- -r
|
||||
- 5 # << enter FPS here
|
||||
- -stream_loop
|
||||
- -1
|
||||
- -f
|
||||
- image2
|
||||
- -avoid_negative_ts
|
||||
- make_zero
|
||||
- -fflags
|
||||
- nobuffer
|
||||
- -flags
|
||||
- low_delay
|
||||
- -strict
|
||||
- experimental
|
||||
- -fflags
|
||||
- +genpts+discardcorrupt
|
||||
- -use_wallclock_as_timestamps
|
||||
- 1
|
||||
```
|
||||
|
||||
Outputting the stream will have the same args and caveats as per [MJPEG Cameras](#mjpeg-cameras)
|
||||
|
||||
### RTMP Cameras
|
||||
|
||||
The input parameters need to be adjusted for RTMP cameras
|
||||
@@ -89,8 +61,8 @@ cameras:
|
||||
roles:
|
||||
- detect
|
||||
detect:
|
||||
width: 896
|
||||
height: 672
|
||||
width: 640
|
||||
height: 480
|
||||
fps: 7
|
||||
```
|
||||
|
||||
|
||||
@@ -159,26 +159,9 @@ detect:
|
||||
enabled: True
|
||||
# Optional: Number of frames without a detection before frigate considers an object to be gone. (default: 5x the frame rate)
|
||||
max_disappeared: 25
|
||||
# Optional: Configuration for stationary object tracking
|
||||
stationary:
|
||||
# Optional: Frequency for running detection on stationary objects (default: shown below)
|
||||
# When set to 0, object detection will never be run on stationary objects. If set to 10, it will be run on every 10th frame.
|
||||
interval: 0
|
||||
# Optional: Number of frames without a position change for an object to be considered stationary (default: 10x the frame rate or 10s)
|
||||
threshold: 50
|
||||
# Optional: Define a maximum number of frames for tracking a stationary object (default: not set, track forever)
|
||||
# This can help with false positives for objects that should only be stationary for a limited amount of time.
|
||||
# It can also be used to disable stationary object tracking. For example, you may want to set a value for person, but leave
|
||||
# car at the default.
|
||||
# WARNING: Setting these values overrides default behavior and disables stationary object tracking.
|
||||
# There are very few situations where you would want it disabled. It is NOT recommended to
|
||||
# copy these values from the example config into your config unless you know they are needed.
|
||||
max_frames:
|
||||
# Optional: Default for all object types (default: not set, track forever)
|
||||
default: 3000
|
||||
# Optional: Object specific values
|
||||
objects:
|
||||
person: 1000
|
||||
# Optional: Frequency for running detection on stationary objects (default: 0)
|
||||
# When set to 0, object detection will never be run on stationary objects. If set to 10, it will be run on every 10th frame.
|
||||
stationary_interval: 0
|
||||
|
||||
# Optional: Object configuration
|
||||
# NOTE: Can be overridden at the camera level
|
||||
@@ -236,20 +219,11 @@ motion:
|
||||
# Optional: motion mask
|
||||
# NOTE: see docs for more detailed info on creating masks
|
||||
mask: 0,900,1080,900,1080,1920,0,1920
|
||||
# Optional: improve contrast (default: shown below)
|
||||
# Enables dynamic contrast improvement. This should help improve night detections at the cost of making motion detection more sensitive
|
||||
# for daytime.
|
||||
improve_contrast: False
|
||||
|
||||
# Optional: Record configuration
|
||||
# NOTE: Can be overridden at the camera level
|
||||
record:
|
||||
# Optional: Enable recording (default: shown below)
|
||||
# WARNING: Frigate does not currently support limiting recordings based
|
||||
# on available disk space automatically. If using recordings,
|
||||
# you must specify retention settings for a number of days that
|
||||
# will fit within the available disk space of your drive or Frigate
|
||||
# will crash.
|
||||
enabled: False
|
||||
# Optional: Number of minutes to wait between cleanup runs (default: shown below)
|
||||
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
|
||||
@@ -407,7 +381,7 @@ cameras:
|
||||
# camera.
|
||||
front_steps:
|
||||
# Required: List of x,y coordinates to define the polygon of the zone.
|
||||
# NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box.
|
||||
# NOTE: Coordinates can be generated at https://www.image-map.net/
|
||||
coordinates: 545,1077,747,939,788,805
|
||||
# Optional: List of objects that can trigger this zone (default: all tracked objects)
|
||||
objects:
|
||||
|
||||
@@ -97,3 +97,15 @@ processes:
|
||||
| 0 N/A N/A 12827 C ffmpeg 417MiB |
|
||||
+-----------------------------------------------------------------------------+
|
||||
```
|
||||
|
||||
To further improve performance, you can set ffmpeg to skip frames in the output,
|
||||
using the fps filter:
|
||||
|
||||
```yaml
|
||||
output_args:
|
||||
- -filter:v
|
||||
- fps=fps=5
|
||||
```
|
||||
|
||||
This setting, for example, allows Frigate to consume my 10-15fps camera streams on
|
||||
my relatively low powered Haswell machine with relatively low cpu usage.
|
||||
|
||||
@@ -5,11 +5,7 @@ title: Objects
|
||||
|
||||
import labels from "../../../labelmap.txt";
|
||||
|
||||
Frigate includes the object models listed below from the Google Coral test data.
|
||||
|
||||
Please note:
|
||||
- `car` is listed twice because `truck` has been renamed to `car` by default. These object types are frequently confused.
|
||||
- `person` is the only tracked object by default. See the [full configuration reference](https://docs.frigate.video/configuration/index#full-configuration-reference) for an example of expanding the list of tracked objects.
|
||||
By default, Frigate includes the following object models from the Google Coral test data. Note that `car` is listed twice because `truck` has been renamed to `car` by default. These object types are frequently confused.
|
||||
|
||||
<ul>
|
||||
{labels.split("\n").map((label) => (
|
||||
|
||||
@@ -21,18 +21,4 @@ record:
|
||||
|
||||
This configuration will retain recording segments that overlap with events and have active tracked objects for 10 days. Because multiple events can reference the same recording segments, this avoids storing duplicate footage for overlapping events and reduces overall storage needs.
|
||||
|
||||
When `retain -> days` is set to `0`, segments will be deleted from the cache if no events are in progress.
|
||||
|
||||
## What do the different retain modes mean?
|
||||
|
||||
Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for 24/7 recording (but can also affect events).
|
||||
|
||||
Let's say you have frigate configured so that your doorbell camera would retain the last **2** days of 24/7 recording.
|
||||
- With the `all` option all 48 hours of those two days would be kept and viewable.
|
||||
- With the `motion` option the only parts of those 48 hours would be segments that frigate detected motion. This is the middle ground option that won't keep all 48 hours, but will likely keep all segments of interest along with the potential for some extra segments.
|
||||
- With the `active_objects` option the only segments that would be kept are those where there was a true positive object that was not considered stationary.
|
||||
|
||||
The same options are available with events. Let's consider a scenario where you drive up and park in your driveway, go inside, then come back out 4 hours later.
|
||||
- With the `all` option all segments for the duration of the event would be saved for the event. This event would have 4 hours of footage.
|
||||
- With the `motion` option all segments for the duration of the event with motion would be saved. This means any segment where a car drove by in the street, person walked by, lighting changed, etc. would be saved.
|
||||
- With the `active_objects` it would only keep segments where the object was active. In this case the only segments that would be saved would be the ones where the car was driving up, you going inside, you coming outside, and the car driving away. Essentially reducing the 4 hours to a minute or two of event footage.
|
||||
When `retain_days` is set to `0`, segments will be deleted from the cache if no events are in progress.
|
||||
|
||||
@@ -5,4 +5,4 @@ title: RTMP
|
||||
|
||||
Frigate can re-stream your video feed as a RTMP feed for other applications such as Home Assistant to utilize it at `rtmp://<frigate_host>/live/<camera_name>`. Port 1935 must be open. This allows you to use a video feed for detection in frigate and Home Assistant live view at the same time without having to make two separate connections to the camera. The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
|
||||
|
||||
Some video feeds are not compatible with RTMP. If you are experiencing issues, check to make sure your camera feed is h264 with AAC audio. If your camera doesn't support a compatible format for RTMP, you can use the ffmpeg args to re-encode it on the fly at the expense of increased CPU utilization. Some more information about it can be found [here](../faqs#audio-in-recordings).
|
||||
Some video feeds are not compatible with RTMP. If you are experiencing issues, check to make sure your camera feed is h264 with AAC audio. If your camera doesn't support a compatible format for RTMP, you can use the ffmpeg args to re-encode it on the fly at the expense of increased CPU utilization.
|
||||
|
||||
@@ -3,9 +3,7 @@ id: zones
|
||||
title: Zones
|
||||
---
|
||||
|
||||
Zones allow you to define a specific area of the frame and apply additional filters for object types so you can determine whether or not an object is within a particular area. Presence in a zone is evaluated based on the bottom center of the bounding box for the object. It does not matter how much of the bounding box overlaps with the zone.
|
||||
|
||||
Zones cannot have the same name as a camera. If desired, a single zone can include multiple cameras if you have multiple cameras covering the same area by configuring zones with the same name for each camera.
|
||||
Zones allow you to define a specific area of the frame and apply additional filters for object types so you can determine whether or not an object is within a particular area. Zones cannot have the same name as a camera. If desired, a single zone can include multiple cameras if you have multiple cameras covering the same area by configuring zones with the same name for each camera.
|
||||
|
||||
During testing, enable the Zones option for the debug feed so you can adjust as needed. The zone line will increase in thickness when any object enters the zone.
|
||||
|
||||
|
||||
@@ -11,24 +11,9 @@ This error message is due to a shm-size that is too small. Try updating your shm
|
||||
|
||||
A solid green image means that frigate has not received any frames from ffmpeg. Check the logs to see why ffmpeg is exiting and adjust your ffmpeg args accordingly.
|
||||
|
||||
### How can I get sound or audio in my recordings? {#audio-in-recordings}
|
||||
### How can I get sound or audio in my recordings?
|
||||
|
||||
By default, Frigate removes audio from recordings to reduce the likelihood of failing for invalid data. If you would like to include audio, you need to override the output args to remove `-an` for where you want to include audio. The recommended audio codec is `aac`. Not all audio codecs are supported by RTMP, so you may need to re-encode your audio with `-c:a aac`. The default ffmpeg args are shown [here](/configuration/index/#full-configuration-reference).
|
||||
|
||||
:::tip
|
||||
|
||||
When using `-c:a aac`, do not forget to replace `-c copy` with `-c:v copy`. Example:
|
||||
|
||||
```diff title="frigate.yml"
|
||||
ffmpeg:
|
||||
output_args:
|
||||
- record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an
|
||||
+ record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -c:a aac
|
||||
```
|
||||
|
||||
This is needed because the `-c` flag (without `:a` or `:v`) applies for both audio and video, thus making it conflicting with `-c:a aac`.
|
||||
|
||||
:::
|
||||
By default, Frigate removes audio from recordings to reduce the likelihood of failing for invalid data. If you would like to include audio, you need to override the output args to remove `-an` for where you want to include audio. The recommended audio codec is `aac`. Not all audio codecs are supported by RTMP, so you may need to re-encode your audio with `-c:a aac`. The default ffmpeg args are shown [here](configuration/index#full-configuration-reference).
|
||||
|
||||
### My mjpeg stream or snapshots look green and crazy
|
||||
|
||||
|
||||
@@ -62,8 +62,6 @@ cameras:
|
||||
roles:
|
||||
- detect
|
||||
- rtmp
|
||||
rtmp:
|
||||
enabled: False # <-- RTMP should be disabled if your stream is not H264
|
||||
detect:
|
||||
width: 1280 # <---- update for your camera's resolution
|
||||
height: 720 # <---- update for your camera's resolution
|
||||
@@ -73,9 +71,7 @@ cameras:
|
||||
|
||||
At this point you should be able to start Frigate and see the the video feed in the UI.
|
||||
|
||||
If you get a green image from the camera, this means ffmpeg was not able to get the video feed from your camera. Check the logs for error messages from ffmpeg. The default ffmpeg arguments are designed to work with H264 RTSP cameras that support TCP connections. If you do not have H264 cameras, make sure you have disabled RTMP. It is possible to enable it, but you must tell ffmpeg to re-encode the video with customized output args.
|
||||
|
||||
FFmpeg arguments for other types of cameras can be found [here](/configuration/camera_specific).
|
||||
If you get a green image from the camera, this means ffmpeg was not able to get the video feed from your camera. Check the logs for error messages from ffmpeg. The default ffmpeg arguments are designed to work with RTSP cameras that support TCP connections. FFmpeg arguments for other types of cameras can be found [here](/configuration/camera_specific).
|
||||
|
||||
### Step 5: Configure hardware acceleration (optional)
|
||||
|
||||
@@ -167,17 +163,13 @@ cameras:
|
||||
roles:
|
||||
- detect
|
||||
- rtmp
|
||||
- path: rtsp://10.0.10.10:554/high_res_stream # <----- Add high res stream
|
||||
roles:
|
||||
- record
|
||||
- record # <----- Add role
|
||||
detect: ...
|
||||
record: # <----- Enable recording
|
||||
enabled: True
|
||||
motion: ...
|
||||
```
|
||||
|
||||
If you don't have separate streams for detect and record, you would just add the record role to the list on the first input.
|
||||
|
||||
By default, Frigate will retain video of all events for 10 days. The full set of options for recording can be found [here](/configuration/index#full-configuration-reference).
|
||||
|
||||
### Step 8: Enable snapshots (optional)
|
||||
|
||||
@@ -25,30 +25,6 @@ automation:
|
||||
when: '{{trigger.payload_json["after"]["start_time"]|int}}'
|
||||
```
|
||||
|
||||
Note that iOS devices support live previews of cameras by adding a camera entity id to the message data.
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: Security_Frigate_Notifications
|
||||
description: ""
|
||||
trigger:
|
||||
- platform: mqtt
|
||||
topic: frigate/events
|
||||
payload: new
|
||||
value_template: "{{ value_json.type }}"
|
||||
action:
|
||||
- service: notify.mobile_app_iphone
|
||||
data:
|
||||
message: 'A {{trigger.payload_json["after"]["label"]}} was detected.'
|
||||
data:
|
||||
image: >-
|
||||
https://your.public.hass.address.com/api/frigate/notifications/{{trigger.payload_json["after"]["id"]}}/thumbnail.jpg
|
||||
tag: '{{trigger.payload_json["after"]["id"]}}'
|
||||
when: '{{trigger.payload_json["after"]["start_time"]|int}}'
|
||||
entity_id: camera.{{trigger.payload_json["after"]["camera"]}}
|
||||
mode: single
|
||||
```
|
||||
|
||||
## Conditions
|
||||
|
||||
Conditions with the `before` and `after` values allow a high degree of customization for automations.
|
||||
|
||||
@@ -21,17 +21,19 @@ I may earn a small commission for my endorsement, recommendation, testimonial, o
|
||||
|
||||
## Server
|
||||
|
||||
My current favorite is the Minisforum GK41 because of the dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||
My current favorite is the Odyssey X86 Blue J4125 because the Coral M.2 compatibility and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||
|
||||
| Name | Inference Speed | Coral Compatibility | Notes |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------- | --------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| <a href="https://amzn.to/3oH4BKi" target="_blank" rel="nofollow noopener sponsored">Odyssey X86 Blue J4125</a> (affiliate link) | 9-10ms | M.2 B+M | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
| <a href="https://amzn.to/3ptnb8D" target="_blank" rel="nofollow noopener sponsored">Minisforum GK41</a> (affiliate link) | 9-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
| <a href="https://amzn.to/35E79BC" target="_blank" rel="nofollow noopener sponsored">Beelink GK55</a> (affiliate link) | 9-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
| <a href="https://amzn.to/3psFlHi" target="_blank" rel="nofollow noopener sponsored">Intel NUC</a> (affiliate link) | 8-10ms | USB | Overkill for most, but great performance. Can handle many cameras at 5fps depending on typical amounts of motion. Requires extra parts. |
|
||||
| <a href="https://amzn.to/3a6TBh8" target="_blank" rel="nofollow noopener sponsored">BMAX B2 Plus</a> (affiliate link) | 10-12ms | USB | Good balance of performance and cost. Also capable of running many other services at the same time as frigate. |
|
||||
| <a href="https://amzn.to/2YjpY9m" target="_blank" rel="nofollow noopener sponsored">Atomic Pi</a> (affiliate link) | 16ms | USB | Good option for a dedicated low power board with a small number of cameras. Can leverage Intel QuickSync for stream decoding. |
|
||||
| <a href="https://amzn.to/2YhSGHH" target="_blank" rel="nofollow noopener sponsored">Raspberry Pi 4 (64bit)</a> (affiliate link) | 10-15ms | USB | Can handle a small number of cameras. |
|
||||
| Name | Inference Speed | Coral Compatibility | Notes |
|
||||
| -------------------------------------------------------------------------------------------------------------------------------- | --------------- | ------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||
| <a href="https://amzn.to/3oH4BKi" target="_blank" rel="nofollow noopener sponsored">Odyssey X86 Blue J4125</a> (affiliate link) | 9-10ms | M.2 B+M | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
| <a href="https://amzn.to/3oxEC8m" target="_blank" rel="nofollow noopener sponsored">Minisforum GK41</a> (affiliate link) | 9-10ms | USB | Great alternative to a NUC. Easily handles several 1080p cameras. |
|
||||
| <a href="https://amzn.to/3ixJFlb" target="_blank" rel="nofollow noopener sponsored">Minisforum GK50</a> (affiliate link) | 9-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
| <a href="https://amzn.to/3l7vCEI" target="_blank" rel="nofollow noopener sponsored">Intel NUC</a> (affiliate link) | 8-10ms | USB | Overkill for most, but great performance. Can handle many cameras at 5fps depending on typical amounts of motion. |
|
||||
| <a href="https://amzn.to/3a6TBh8" target="_blank" rel="nofollow noopener sponsored">BMAX B2 Plus</a> (affiliate link) | 10-12ms | USB | Good balance of performance and cost. Also capable of running many other services at the same time as frigate. |
|
||||
| <a href="https://amzn.to/2YjpY9m" target="_blank" rel="nofollow noopener sponsored">Atomic Pi</a> (affiliate link) | 16ms | USB | Good option for a dedicated low power board with a small number of cameras. Can leverage Intel QuickSync for stream decoding. |
|
||||
| <a href="https://amzn.to/2WIpwRU" target="_blank" rel="nofollow noopener sponsored">Raspberry Pi 3B (32bit)</a> (affiliate link) | 60ms | USB | Can handle a small number of cameras, but the detection speeds are slow due to USB 2.0. |
|
||||
| <a href="https://amzn.to/2YhSGHH" target="_blank" rel="nofollow noopener sponsored">Raspberry Pi 4 (32bit)</a> (affiliate link) | 15-20ms | USB | Can handle a small number of cameras. The 2GB version runs fine. |
|
||||
| <a href="https://amzn.to/2YhSGHH" target="_blank" rel="nofollow noopener sponsored">Raspberry Pi 4 (64bit)</a> (affiliate link) | 10-15ms | USB | Can handle a small number of cameras. The 2GB version runs fine. |
|
||||
|
||||
## Google Coral TPU
|
||||
|
||||
|
||||
@@ -21,12 +21,6 @@ Windows is not officially supported, but some users have had success getting it
|
||||
|
||||
Frigate uses the following locations for read/write operations in the container. Docker volume mappings can be used to map these to any location on your host machine.
|
||||
|
||||
:::caution
|
||||
|
||||
Note that Frigate does not currently support limiting recordings based on available disk space automatically. If using recordings, you must specify retention settings for a number of days that will fit within the available disk space of your drive or Frigate will crash.
|
||||
|
||||
:::
|
||||
|
||||
- `/media/frigate/clips`: Used for snapshot storage. In the future, it will likely be renamed from `clips` to `snapshots`. The file structure here cannot be modified and isn't intended to be browsed or managed manually.
|
||||
- `/media/frigate/recordings`: Internal system storage for recording segments. The file structure here cannot be modified and isn't intended to be browsed or managed manually.
|
||||
- `/media/frigate/frigate.db`: Default location for the sqlite database. You will also see several files alongside this file while frigate is running. If moving the database location (often needed when using a network drive at `/media/frigate`), it is recommended to mount a volume with docker at `/db` and change the storage location of the database to `/db/frigate.db` in the config file.
|
||||
@@ -124,7 +118,6 @@ services:
|
||||
shm_size: "64mb" # update for your cameras based on calculation above
|
||||
devices:
|
||||
- /dev/bus/usb:/dev/bus/usb # passes the USB Coral, needs to be modified for other versions
|
||||
- /dev/apex_0:/dev/apex_0 # passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
|
||||
- /dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
@@ -184,15 +177,6 @@ HassOS users can install via the addon repository.
|
||||
6. Start the addon container
|
||||
7. (not for proxy addon) If you are using hardware acceleration for ffmpeg, you may need to disable "Protection mode"
|
||||
|
||||
There are several versions of the addon available:
|
||||
|
||||
| Addon Version | Description |
|
||||
| ------------------------------ | ---------------------------------------------------------- |
|
||||
| Frigate NVR | Current release with protection mode on |
|
||||
| Frigate NVR (Full Access) | Current release with the option to disable protection mode |
|
||||
| Frigate NVR Beta | Beta release with protection mode on |
|
||||
| Frigate NVR Beta (Full Access) | Beta release with the option to disable protection mode |
|
||||
|
||||
## Home Assistant Supervised
|
||||
|
||||
:::tip
|
||||
|
||||
@@ -45,14 +45,11 @@ that card.
|
||||
|
||||
## Configuration
|
||||
|
||||
When configuring the integration, you will be asked for the `URL` of your frigate instance which is the URL you use to access Frigate in the browser. This may look like `http://<host>:5000/`. If you are using HassOS with the addon, the URL should be one of the following depending on which addon version you are using. Note that if you are using the Proxy Addon, you do NOT point the integration at the proxy URL. Just enter the URL used to access frigate directly from your network.
|
||||
When configuring the integration, you will be asked for the following parameters:
|
||||
|
||||
| Addon Version | URL |
|
||||
| ------------------------------ | -------------------------------------- |
|
||||
| Frigate NVR | `http://ccab4aaf-frigate:5000` |
|
||||
| Frigate NVR (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
|
||||
| Frigate NVR Beta | `http://ccab4aaf-frigate-beta:5000` |
|
||||
| Frigate NVR Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
|
||||
| Variable | Description |
|
||||
| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| URL | The `URL` of your frigate instance, the URL you use to access Frigate in the browser. This may look like `http://<host>:5000/`. If you are using HassOS with the addon, the URL should be `http://ccab4aaf-frigate:5000` (or `http://ccab4aaf-frigate-beta:5000` if your are using the beta version of the addon). Live streams required port 1935, see [RTMP streams](#streams) |
|
||||
|
||||
<a name="options"></a>
|
||||
|
||||
|
||||
@@ -55,10 +55,7 @@ Message published for each changed event. The first message is published when th
|
||||
"entered_zones": ["yard", "driveway"],
|
||||
"thumbnail": null,
|
||||
"has_snapshot": false,
|
||||
"has_clip": false,
|
||||
"stationary": false, // whether or not the object is considered stationary
|
||||
"motionless_count": 0, // number of frames the object has been motionless
|
||||
"position_changes": 2 // number of times the object has moved from a stationary position
|
||||
"has_clip": false
|
||||
},
|
||||
"after": {
|
||||
"id": "1607123955.475377-mxklsc",
|
||||
@@ -78,10 +75,7 @@ Message published for each changed event. The first message is published when th
|
||||
"entered_zones": ["yard", "driveway"],
|
||||
"thumbnail": null,
|
||||
"has_snapshot": false,
|
||||
"has_clip": false,
|
||||
"stationary": false, // whether or not the object is considered stationary
|
||||
"motionless_count": 0, // number of frames the object has been motionless
|
||||
"position_changes": 2 // number of times the object has changed position
|
||||
"has_clip": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
14859
docs/package-lock.json
generated
14859
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -12,13 +12,13 @@
|
||||
"clear": "docusaurus clear"
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "^2.0.0-beta.15",
|
||||
"@docusaurus/preset-classic": "^2.0.0-beta.15",
|
||||
"@mdx-js/react": "^1.6.22",
|
||||
"@docusaurus/core": "^2.0.0-beta.6",
|
||||
"@docusaurus/preset-classic": "^2.0.0-beta.6",
|
||||
"@mdx-js/react": "^1.6.21",
|
||||
"clsx": "^1.1.1",
|
||||
"raw-loader": "^4.0.2",
|
||||
"react": "^16.14.0",
|
||||
"react-dom": "^16.14.0"
|
||||
"react": "^16.8.4",
|
||||
"react-dom": "^16.8.4"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
@@ -31,8 +31,5 @@
|
||||
"last 1 firefox version",
|
||||
"last 1 safari version"
|
||||
]
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/react": "^16.14.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import threading
|
||||
from logging.handlers import QueueHandler
|
||||
from typing import Dict, List
|
||||
|
||||
import traceback
|
||||
import yaml
|
||||
from peewee_migrate import Router
|
||||
from playhouse.sqlite_ext import SqliteExtDatabase
|
||||
@@ -321,7 +320,6 @@ class FrigateApp:
|
||||
print("*** Config Validation Errors ***")
|
||||
print("*************************************************************")
|
||||
print(e)
|
||||
print(traceback.format_exc())
|
||||
print("*************************************************************")
|
||||
print("*** End Config Validation Errors ***")
|
||||
print("*************************************************************")
|
||||
|
||||
@@ -122,7 +122,6 @@ class MotionConfig(FrigateBaseModel):
|
||||
ge=1,
|
||||
le=255,
|
||||
)
|
||||
improve_contrast: bool = Field(default=False, title="Improve Contrast")
|
||||
contour_area: Optional[int] = Field(default=30, title="Contour Area")
|
||||
delta_alpha: float = Field(default=0.2, title="Delta Alpha")
|
||||
frame_alpha: float = Field(default=0.2, title="Frame Alpha")
|
||||
@@ -163,29 +162,6 @@ class RuntimeMotionConfig(MotionConfig):
|
||||
extra = Extra.ignore
|
||||
|
||||
|
||||
class StationaryMaxFramesConfig(FrigateBaseModel):
|
||||
default: Optional[int] = Field(title="Default max frames.", ge=1)
|
||||
objects: Dict[str, int] = Field(
|
||||
default_factory=dict, title="Object specific max frames."
|
||||
)
|
||||
|
||||
|
||||
class StationaryConfig(FrigateBaseModel):
|
||||
interval: Optional[int] = Field(
|
||||
default=0,
|
||||
title="Frame interval for checking stationary objects.",
|
||||
ge=0,
|
||||
)
|
||||
threshold: Optional[int] = Field(
|
||||
title="Number of frames without a position change for an object to be considered stationary",
|
||||
ge=1,
|
||||
)
|
||||
max_frames: StationaryMaxFramesConfig = Field(
|
||||
default_factory=StationaryMaxFramesConfig,
|
||||
title="Max frames for stationary objects.",
|
||||
)
|
||||
|
||||
|
||||
class DetectConfig(FrigateBaseModel):
|
||||
height: int = Field(default=720, title="Height of the stream for the detect role.")
|
||||
width: int = Field(default=1280, title="Width of the stream for the detect role.")
|
||||
@@ -196,9 +172,10 @@ class DetectConfig(FrigateBaseModel):
|
||||
max_disappeared: Optional[int] = Field(
|
||||
title="Maximum number of frames the object can dissapear before detection ends."
|
||||
)
|
||||
stationary: StationaryConfig = Field(
|
||||
default_factory=StationaryConfig,
|
||||
title="Stationary objects config.",
|
||||
stationary_interval: Optional[int] = Field(
|
||||
default=0,
|
||||
title="Frame interval for checking stationary objects.",
|
||||
ge=0,
|
||||
)
|
||||
|
||||
|
||||
@@ -789,11 +766,6 @@ class FrigateConfig(FrigateBaseModel):
|
||||
if camera_config.detect.max_disappeared is None:
|
||||
camera_config.detect.max_disappeared = max_disappeared
|
||||
|
||||
# Default stationary_threshold configuration
|
||||
stationary_threshold = camera_config.detect.fps * 10
|
||||
if camera_config.detect.stationary.threshold is None:
|
||||
camera_config.detect.stationary.threshold = stationary_threshold
|
||||
|
||||
# FFMPEG input substitution
|
||||
for input in camera_config.ffmpeg.inputs:
|
||||
input.path = input.path.format(**FRIGATE_ENV_VARS)
|
||||
@@ -864,18 +836,14 @@ class FrigateConfig(FrigateBaseModel):
|
||||
camera_config.record.retain.days = camera_config.record.retain_days
|
||||
|
||||
# warning if the higher level record mode is potentially more restrictive than the events
|
||||
rank_map = {
|
||||
RetainModeEnum.all: 0,
|
||||
RetainModeEnum.motion: 1,
|
||||
RetainModeEnum.active_objects: 2,
|
||||
}
|
||||
if (
|
||||
camera_config.record.retain.days != 0
|
||||
and rank_map[camera_config.record.retain.mode]
|
||||
> rank_map[camera_config.record.events.retain.mode]
|
||||
and camera_config.record.retain.mode != RetainModeEnum.all
|
||||
and camera_config.record.events.retain.mode
|
||||
!= camera_config.record.retain.mode
|
||||
):
|
||||
logger.warning(
|
||||
f"{name}: Recording retention is configured for {camera_config.record.retain.mode} and event retention is configured for {camera_config.record.events.retain.mode}. The more restrictive retention policy will be applied."
|
||||
f"Recording retention is configured for {camera_config.record.retain.mode} and event retention is configured for {camera_config.record.events.retain.mode}. The more restrictive retention policy will be applied."
|
||||
)
|
||||
# generage the ffmpeg commands
|
||||
camera_config.create_ffmpeg_cmds()
|
||||
|
||||
@@ -15,16 +15,6 @@ from frigate.models import Event
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def should_update_db(prev_event, current_event):
|
||||
return (
|
||||
prev_event["top_score"] != current_event["top_score"]
|
||||
or prev_event["entered_zones"] != current_event["entered_zones"]
|
||||
or prev_event["thumbnail"] != current_event["thumbnail"]
|
||||
or prev_event["has_clip"] != current_event["has_clip"]
|
||||
or prev_event["has_snapshot"] != current_event["has_snapshot"]
|
||||
)
|
||||
|
||||
|
||||
class EventProcessor(threading.Thread):
|
||||
def __init__(
|
||||
self, config, camera_processes, event_queue, event_processed_queue, stop_event
|
||||
@@ -58,9 +48,7 @@ class EventProcessor(threading.Thread):
|
||||
if event_type == "start":
|
||||
self.events_in_process[event_data["id"]] = event_data
|
||||
|
||||
elif event_type == "update" and should_update_db(
|
||||
self.events_in_process[event_data["id"]], event_data
|
||||
):
|
||||
elif event_type == "update":
|
||||
self.events_in_process[event_data["id"]] = event_data
|
||||
# TODO: this will generate a lot of db activity possibly
|
||||
if event_data["has_clip"] or event_data["has_snapshot"]:
|
||||
|
||||
@@ -249,10 +249,7 @@ def event_clip(id):
|
||||
clip_path = os.path.join(CLIPS_DIR, file_name)
|
||||
|
||||
if not os.path.isfile(clip_path):
|
||||
end_ts = (
|
||||
datetime.now().timestamp() if event.end_time is None else event.end_time
|
||||
)
|
||||
return recording_clip(event.camera, event.start_time, end_ts)
|
||||
return recording_clip(event.camera, event.start_time, event.end_time)
|
||||
|
||||
response = make_response()
|
||||
response.headers["Content-Description"] = "File Transfer"
|
||||
@@ -367,13 +364,7 @@ def best(camera_name, label):
|
||||
box_size = 300
|
||||
box = best_object.get("box", (0, 0, box_size, box_size))
|
||||
region = calculate_region(
|
||||
best_frame.shape,
|
||||
box[0],
|
||||
box[1],
|
||||
box[2],
|
||||
box[3],
|
||||
box_size,
|
||||
multiplier=1.1,
|
||||
best_frame.shape, box[0], box[1], box[2], box[3], box_size, multiplier=1.1
|
||||
)
|
||||
best_frame = best_frame[region[1] : region[3], region[0] : region[2]]
|
||||
|
||||
@@ -527,17 +518,12 @@ def recordings(camera_name):
|
||||
FROM C2
|
||||
WHERE cnt = 0
|
||||
)
|
||||
SELECT id, label, camera, top_score, start_time, end_time
|
||||
FROM event
|
||||
WHERE camera = ? AND end_time IS NULL
|
||||
UNION ALL
|
||||
SELECT MIN(id) as id, label, camera, MAX(top_score) as top_score, MIN(ts) AS start_time, max(ts) AS end_time
|
||||
FROM C3
|
||||
GROUP BY label, grpnum
|
||||
ORDER BY start_time;""",
|
||||
camera_name,
|
||||
camera_name,
|
||||
camera_name,
|
||||
)
|
||||
|
||||
event: Event
|
||||
@@ -725,15 +711,7 @@ def vod_event(id):
|
||||
end_ts = (
|
||||
datetime.now().timestamp() if event.end_time is None else event.end_time
|
||||
)
|
||||
vod_response = vod_ts(event.camera, event.start_time, end_ts)
|
||||
# If the recordings are not found, set has_clip to false
|
||||
if (
|
||||
type(vod_response) == tuple
|
||||
and len(vod_response) == 2
|
||||
and vod_response[1] == 404
|
||||
):
|
||||
Event.update(has_clip=False).where(Event.id == id).execute()
|
||||
return vod_response
|
||||
return vod_ts(event.camera, event.start_time, end_ts)
|
||||
|
||||
duration = int((event.end_time - event.start_time) * 1000)
|
||||
return jsonify(
|
||||
|
||||
@@ -38,15 +38,14 @@ class MotionDetector:
|
||||
)
|
||||
|
||||
# Improve contrast
|
||||
if self.config.improve_contrast:
|
||||
minval = np.percentile(resized_frame, 4)
|
||||
maxval = np.percentile(resized_frame, 96)
|
||||
# don't adjust if the image is a single color
|
||||
if minval < maxval:
|
||||
resized_frame = np.clip(resized_frame, minval, maxval)
|
||||
resized_frame = (
|
||||
((resized_frame - minval) / (maxval - minval)) * 255
|
||||
).astype(np.uint8)
|
||||
minval = np.percentile(resized_frame, 4)
|
||||
maxval = np.percentile(resized_frame, 96)
|
||||
# don't adjust if the image is a single color
|
||||
if minval < maxval:
|
||||
resized_frame = np.clip(resized_frame, minval, maxval)
|
||||
resized_frame = (
|
||||
((resized_frame - minval) / (maxval - minval)) * 255
|
||||
).astype(np.uint8)
|
||||
|
||||
# mask frame
|
||||
resized_frame[self.mask] = [255]
|
||||
|
||||
@@ -101,13 +101,14 @@ class TrackedObject:
|
||||
return median(scores)
|
||||
|
||||
def update(self, current_frame_time, obj_data):
|
||||
thumb_update = False
|
||||
significant_change = False
|
||||
significant_update = False
|
||||
zone_change = False
|
||||
self.obj_data.update(obj_data)
|
||||
# if the object is not in the current frame, add a 0.0 to the score history
|
||||
if obj_data["frame_time"] != current_frame_time:
|
||||
if self.obj_data["frame_time"] != current_frame_time:
|
||||
self.score_history.append(0.0)
|
||||
else:
|
||||
self.score_history.append(obj_data["score"])
|
||||
self.score_history.append(self.obj_data["score"])
|
||||
# only keep the last 10 scores
|
||||
if len(self.score_history) > 10:
|
||||
self.score_history = self.score_history[-10:]
|
||||
@@ -121,24 +122,24 @@ class TrackedObject:
|
||||
if not self.false_positive:
|
||||
# determine if this frame is a better thumbnail
|
||||
if self.thumbnail_data is None or is_better_thumbnail(
|
||||
self.thumbnail_data, obj_data, self.camera_config.frame_shape
|
||||
self.thumbnail_data, self.obj_data, self.camera_config.frame_shape
|
||||
):
|
||||
self.thumbnail_data = {
|
||||
"frame_time": obj_data["frame_time"],
|
||||
"box": obj_data["box"],
|
||||
"area": obj_data["area"],
|
||||
"region": obj_data["region"],
|
||||
"score": obj_data["score"],
|
||||
"frame_time": self.obj_data["frame_time"],
|
||||
"box": self.obj_data["box"],
|
||||
"area": self.obj_data["area"],
|
||||
"region": self.obj_data["region"],
|
||||
"score": self.obj_data["score"],
|
||||
}
|
||||
thumb_update = True
|
||||
significant_update = True
|
||||
|
||||
# check zones
|
||||
current_zones = []
|
||||
bottom_center = (obj_data["centroid"][0], obj_data["box"][3])
|
||||
bottom_center = (self.obj_data["centroid"][0], self.obj_data["box"][3])
|
||||
# check each zone
|
||||
for name, zone in self.camera_config.zones.items():
|
||||
# if the zone is not for this object type, skip
|
||||
if len(zone.objects) > 0 and not obj_data["label"] in zone.objects:
|
||||
if len(zone.objects) > 0 and not self.obj_data["label"] in zone.objects:
|
||||
continue
|
||||
contour = zone.contour
|
||||
# check if the object is in the zone
|
||||
@@ -149,29 +150,12 @@ class TrackedObject:
|
||||
if name not in self.entered_zones:
|
||||
self.entered_zones.append(name)
|
||||
|
||||
if not self.false_positive:
|
||||
# if the zones changed, signal an update
|
||||
if set(self.current_zones) != set(current_zones):
|
||||
significant_change = True
|
||||
# if the zones changed, signal an update
|
||||
if not self.false_positive and set(self.current_zones) != set(current_zones):
|
||||
zone_change = True
|
||||
|
||||
# if the position changed, signal an update
|
||||
if self.obj_data["position_changes"] != obj_data["position_changes"]:
|
||||
significant_change = True
|
||||
|
||||
# if the motionless_count reaches the stationary threshold
|
||||
if (
|
||||
self.obj_data["motionless_count"]
|
||||
== self.camera_config.detect.stationary.threshold
|
||||
):
|
||||
significant_change = True
|
||||
|
||||
# update at least once per minute
|
||||
if self.obj_data["frame_time"] - self.previous["frame_time"] > 60:
|
||||
significant_change = True
|
||||
|
||||
self.obj_data.update(obj_data)
|
||||
self.current_zones = current_zones
|
||||
return (thumb_update, significant_change)
|
||||
return (significant_update, zone_change)
|
||||
|
||||
def to_dict(self, include_thumbnail: bool = False):
|
||||
snapshot_time = (
|
||||
@@ -193,8 +177,6 @@ class TrackedObject:
|
||||
"box": self.obj_data["box"],
|
||||
"area": self.obj_data["area"],
|
||||
"region": self.obj_data["region"],
|
||||
"stationary": self.obj_data["motionless_count"]
|
||||
> self.camera_config.detect.stationary.threshold,
|
||||
"motionless_count": self.obj_data["motionless_count"],
|
||||
"position_changes": self.obj_data["position_changes"],
|
||||
"current_zones": self.current_zones.copy(),
|
||||
@@ -484,11 +466,11 @@ class CameraState:
|
||||
|
||||
for id in updated_ids:
|
||||
updated_obj = tracked_objects[id]
|
||||
thumb_update, significant_update = updated_obj.update(
|
||||
significant_update, zone_change = updated_obj.update(
|
||||
frame_time, current_detections[id]
|
||||
)
|
||||
|
||||
if thumb_update:
|
||||
if significant_update:
|
||||
# ensure this frame is stored in the cache
|
||||
if (
|
||||
updated_obj.thumbnail_data["frame_time"] == frame_time
|
||||
@@ -498,13 +480,13 @@ class CameraState:
|
||||
|
||||
updated_obj.last_updated = frame_time
|
||||
|
||||
# if it has been more than 5 seconds since the last thumb update
|
||||
# if it has been more than 5 seconds since the last publish
|
||||
# and the last update is greater than the last publish or
|
||||
# the object has changed significantly
|
||||
# the object has changed zones
|
||||
if (
|
||||
frame_time - updated_obj.last_published > 5
|
||||
and updated_obj.last_updated > updated_obj.last_published
|
||||
) or significant_update:
|
||||
) or zone_change:
|
||||
# call event handlers
|
||||
for c in self.callbacks["update"]:
|
||||
c(self.name, updated_obj, frame_time)
|
||||
|
||||
@@ -48,7 +48,7 @@ class ObjectTracker:
|
||||
del self.tracked_objects[id]
|
||||
del self.disappeared[id]
|
||||
|
||||
# tracks the current position of the object based on the last N bounding boxes
|
||||
# tracks the current position of the object based on the last 10 bounding boxes
|
||||
# returns False if the object has moved outside its previous position
|
||||
def update_position(self, id, box):
|
||||
position = self.positions[id]
|
||||
@@ -93,51 +93,19 @@ class ObjectTracker:
|
||||
|
||||
return True
|
||||
|
||||
def is_expired(self, id):
|
||||
obj = self.tracked_objects[id]
|
||||
# get the max frames for this label type or the default
|
||||
max_frames = self.detect_config.stationary.max_frames.objects.get(
|
||||
obj["label"], self.detect_config.stationary.max_frames.default
|
||||
)
|
||||
|
||||
# if there is no max_frames for this label type, continue
|
||||
if max_frames is None:
|
||||
return False
|
||||
|
||||
# if the object has exceeded the max_frames setting, deregister
|
||||
if (
|
||||
obj["motionless_count"] - self.detect_config.stationary.threshold
|
||||
> max_frames
|
||||
):
|
||||
return True
|
||||
|
||||
def update(self, id, new_obj):
|
||||
self.disappeared[id] = 0
|
||||
# update the motionless count if the object has not moved to a new position
|
||||
if self.update_position(id, new_obj["box"]):
|
||||
self.tracked_objects[id]["motionless_count"] += 1
|
||||
if self.is_expired(id):
|
||||
self.deregister(id)
|
||||
return
|
||||
else:
|
||||
# register the first position change and then only increment if
|
||||
# the object was previously stationary
|
||||
if (
|
||||
self.tracked_objects[id]["position_changes"] == 0
|
||||
or self.tracked_objects[id]["motionless_count"]
|
||||
>= self.detect_config.stationary.threshold
|
||||
):
|
||||
self.tracked_objects[id]["position_changes"] += 1
|
||||
self.tracked_objects[id]["motionless_count"] = 0
|
||||
|
||||
self.tracked_objects[id]["position_changes"] += 1
|
||||
self.tracked_objects[id].update(new_obj)
|
||||
|
||||
def update_frame_times(self, frame_time):
|
||||
for id in list(self.tracked_objects.keys()):
|
||||
for id in self.tracked_objects.keys():
|
||||
self.tracked_objects[id]["frame_time"] = frame_time
|
||||
self.tracked_objects[id]["motionless_count"] += 1
|
||||
if self.is_expired(id):
|
||||
self.deregister(id)
|
||||
|
||||
def match_and_update(self, frame_time, new_objects):
|
||||
# group by name
|
||||
|
||||
@@ -184,7 +184,10 @@ class BirdsEyeFrameManager:
|
||||
if self.mode == BirdseyeModeEnum.continuous:
|
||||
return True
|
||||
|
||||
if self.mode == BirdseyeModeEnum.motion and motion_box_count > 0:
|
||||
if (
|
||||
self.mode == BirdseyeModeEnum.motion
|
||||
and object_box_count + motion_box_count > 0
|
||||
):
|
||||
return True
|
||||
|
||||
if self.mode == BirdseyeModeEnum.objects and object_box_count > 0:
|
||||
@@ -415,7 +418,7 @@ def output_frames(config: FrigateConfig, video_output_queue):
|
||||
):
|
||||
if birdseye_manager.update(
|
||||
camera,
|
||||
len([o for o in current_tracked_objects if not o["stationary"]]),
|
||||
len(current_tracked_objects),
|
||||
len(motion_boxes),
|
||||
frame_time,
|
||||
frame,
|
||||
|
||||
@@ -51,6 +51,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
self.config = config
|
||||
self.recordings_info_queue = recordings_info_queue
|
||||
self.stop_event = stop_event
|
||||
self.first_pass = True
|
||||
self.recordings_info = defaultdict(list)
|
||||
self.end_time_cache = {}
|
||||
|
||||
@@ -229,7 +230,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
[
|
||||
o
|
||||
for o in frame[1]
|
||||
if not o["false_positive"] and o["motionless_count"] == 0
|
||||
if not o["false_positive"] and o["motionless_count"] > 0
|
||||
]
|
||||
)
|
||||
|
||||
@@ -284,7 +285,6 @@ class RecordingMaintainer(threading.Thread):
|
||||
end_time=end_time.timestamp(),
|
||||
duration=duration,
|
||||
motion=motion_count,
|
||||
# TODO: update this to store list of active objects at some point
|
||||
objects=active_count,
|
||||
)
|
||||
except Exception as e:
|
||||
@@ -333,6 +333,12 @@ class RecordingMaintainer(threading.Thread):
|
||||
logger.error(e)
|
||||
duration = datetime.datetime.now().timestamp() - run_start
|
||||
wait_time = max(0, 5 - duration)
|
||||
if wait_time == 0 and not self.first_pass:
|
||||
logger.warning(
|
||||
"Cache is taking longer than 5 seconds to clear. Your recordings disk may be too slow."
|
||||
)
|
||||
if self.first_pass:
|
||||
self.first_pass = False
|
||||
|
||||
logger.info(f"Exiting recording maintenance...")
|
||||
|
||||
|
||||
@@ -567,9 +567,6 @@ class EventsPerSecond:
|
||||
# compute the (approximate) events in the last n seconds
|
||||
now = datetime.datetime.now().timestamp()
|
||||
seconds = min(now - self._start, last_n_seconds)
|
||||
# avoid divide by zero
|
||||
if seconds == 0:
|
||||
seconds = 1
|
||||
return (
|
||||
len([t for t in self._timestamps if t > (now - last_n_seconds)]) / seconds
|
||||
)
|
||||
@@ -604,7 +601,6 @@ def add_mask(mask, mask_img):
|
||||
)
|
||||
cv2.fillPoly(mask_img, pts=[contour], color=(0))
|
||||
|
||||
|
||||
def load_labels(path, encoding="utf-8"):
|
||||
"""Loads labels from file (with or without index numbers).
|
||||
Args:
|
||||
@@ -624,7 +620,6 @@ def load_labels(path, encoding="utf-8"):
|
||||
else:
|
||||
return {index: line.strip() for index, line in enumerate(lines)}
|
||||
|
||||
|
||||
class FrameManager(ABC):
|
||||
@abstractmethod
|
||||
def create(self, name, size) -> AnyStr:
|
||||
|
||||
372
frigate/video.py
372
frigate/video.py
@@ -153,10 +153,10 @@ def capture_frames(
|
||||
try:
|
||||
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
|
||||
except Exception as e:
|
||||
logger.error(f"{camera_name}: Unable to read frames from ffmpeg process.")
|
||||
logger.info(f"{camera_name}: ffmpeg sent a broken frame. {e}")
|
||||
|
||||
if ffmpeg_process.poll() != None:
|
||||
logger.error(
|
||||
logger.info(
|
||||
f"{camera_name}: ffmpeg process is not running. exiting capture thread..."
|
||||
)
|
||||
frame_manager.delete(frame_name)
|
||||
@@ -221,11 +221,12 @@ class CameraWatchdog(threading.Thread):
|
||||
|
||||
if not self.capture_thread.is_alive():
|
||||
self.logger.error(
|
||||
f"Ffmpeg process crashed unexpectedly for {self.camera_name}."
|
||||
f"FFMPEG process crashed unexpectedly for {self.camera_name}."
|
||||
)
|
||||
self.logger.error(
|
||||
"The following ffmpeg logs include the last 100 lines prior to exit."
|
||||
)
|
||||
self.logger.error("You may have invalid args defined for this camera.")
|
||||
self.logpipe.dump()
|
||||
self.start_ffmpeg_detect()
|
||||
elif now - self.capture_thread.current_frame.value > 20:
|
||||
@@ -491,219 +492,212 @@ def process_frames(
|
||||
logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
|
||||
continue
|
||||
|
||||
if not detection_enabled.value:
|
||||
fps.value = fps_tracker.eps()
|
||||
object_tracker.match_and_update(frame_time, [])
|
||||
detected_objects_queue.put(
|
||||
(camera_name, frame_time, object_tracker.tracked_objects, [], [])
|
||||
)
|
||||
detection_fps.value = object_detector.fps.eps()
|
||||
frame_manager.close(f"{camera_name}{frame_time}")
|
||||
continue
|
||||
|
||||
# look for motion
|
||||
motion_boxes = motion_detector.detect(frame)
|
||||
|
||||
regions = []
|
||||
# get stationary object ids
|
||||
# check every Nth frame for stationary objects
|
||||
# disappeared objects are not stationary
|
||||
# also check for overlapping motion boxes
|
||||
stationary_object_ids = [
|
||||
obj["id"]
|
||||
for obj in object_tracker.tracked_objects.values()
|
||||
# if there hasn't been motion for 10 frames
|
||||
if obj["motionless_count"] >= 10
|
||||
# and it isn't due for a periodic check
|
||||
and (
|
||||
detect_config.stationary_interval == 0
|
||||
or obj["motionless_count"] % detect_config.stationary_interval != 0
|
||||
)
|
||||
# and it hasn't disappeared
|
||||
and object_tracker.disappeared[obj["id"]] == 0
|
||||
# and it doesn't overlap with any current motion boxes
|
||||
and not intersects_any(obj["box"], motion_boxes)
|
||||
]
|
||||
|
||||
# if detection is disabled
|
||||
if not detection_enabled.value:
|
||||
object_tracker.match_and_update(frame_time, [])
|
||||
else:
|
||||
# get stationary object ids
|
||||
# check every Nth frame for stationary objects
|
||||
# disappeared objects are not stationary
|
||||
# also check for overlapping motion boxes
|
||||
stationary_object_ids = [
|
||||
obj["id"]
|
||||
for obj in object_tracker.tracked_objects.values()
|
||||
# if there hasn't been motion for 10 frames
|
||||
if obj["motionless_count"] >= 10
|
||||
# and it isn't due for a periodic check
|
||||
and (
|
||||
detect_config.stationary.interval == 0
|
||||
or obj["motionless_count"] % detect_config.stationary.interval != 0
|
||||
)
|
||||
# and it hasn't disappeared
|
||||
and object_tracker.disappeared[obj["id"]] == 0
|
||||
# and it doesn't overlap with any current motion boxes
|
||||
and not intersects_any(obj["box"], motion_boxes)
|
||||
]
|
||||
# get tracked object boxes that aren't stationary
|
||||
tracked_object_boxes = [
|
||||
obj["box"]
|
||||
for obj in object_tracker.tracked_objects.values()
|
||||
if not obj["id"] in stationary_object_ids
|
||||
]
|
||||
|
||||
# get tracked object boxes that aren't stationary
|
||||
tracked_object_boxes = [
|
||||
obj["box"]
|
||||
for obj in object_tracker.tracked_objects.values()
|
||||
if not obj["id"] in stationary_object_ids
|
||||
]
|
||||
# combine motion boxes with known locations of existing objects
|
||||
combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)
|
||||
|
||||
# combine motion boxes with known locations of existing objects
|
||||
combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)
|
||||
region_min_size = max(model_shape[0], model_shape[1])
|
||||
# compute regions
|
||||
regions = [
|
||||
calculate_region(
|
||||
frame_shape,
|
||||
a[0],
|
||||
a[1],
|
||||
a[2],
|
||||
a[3],
|
||||
region_min_size,
|
||||
multiplier=random.uniform(1.2, 1.5),
|
||||
)
|
||||
for a in combined_boxes
|
||||
]
|
||||
|
||||
region_min_size = max(model_shape[0], model_shape[1])
|
||||
# compute regions
|
||||
regions = [
|
||||
# consolidate regions with heavy overlap
|
||||
regions = [
|
||||
calculate_region(
|
||||
frame_shape, a[0], a[1], a[2], a[3], region_min_size, multiplier=1.0
|
||||
)
|
||||
for a in reduce_boxes(regions, 0.4)
|
||||
]
|
||||
|
||||
# if starting up, get the next startup scan region
|
||||
if startup_scan_counter < 9:
|
||||
ymin = int(frame_shape[0] / 3 * startup_scan_counter / 3)
|
||||
ymax = int(frame_shape[0] / 3 + ymin)
|
||||
xmin = int(frame_shape[1] / 3 * startup_scan_counter / 3)
|
||||
xmax = int(frame_shape[1] / 3 + xmin)
|
||||
regions.append(
|
||||
calculate_region(
|
||||
frame_shape,
|
||||
a[0],
|
||||
a[1],
|
||||
a[2],
|
||||
a[3],
|
||||
region_min_size,
|
||||
multiplier=random.uniform(1.2, 1.5),
|
||||
frame_shape, xmin, ymin, xmax, ymax, region_min_size, multiplier=1.2
|
||||
)
|
||||
for a in combined_boxes
|
||||
]
|
||||
)
|
||||
startup_scan_counter += 1
|
||||
|
||||
# consolidate regions with heavy overlap
|
||||
regions = [
|
||||
calculate_region(
|
||||
frame_shape, a[0], a[1], a[2], a[3], region_min_size, multiplier=1.0
|
||||
# resize regions and detect
|
||||
# seed with stationary objects
|
||||
detections = [
|
||||
(
|
||||
obj["label"],
|
||||
obj["score"],
|
||||
obj["box"],
|
||||
obj["area"],
|
||||
obj["region"],
|
||||
)
|
||||
for obj in object_tracker.tracked_objects.values()
|
||||
if obj["id"] in stationary_object_ids
|
||||
]
|
||||
|
||||
for region in regions:
|
||||
detections.extend(
|
||||
detect(
|
||||
object_detector,
|
||||
frame,
|
||||
model_shape,
|
||||
region,
|
||||
objects_to_track,
|
||||
object_filters,
|
||||
)
|
||||
for a in reduce_boxes(regions, 0.4)
|
||||
]
|
||||
)
|
||||
|
||||
# if starting up, get the next startup scan region
|
||||
if startup_scan_counter < 9:
|
||||
ymin = int(frame_shape[0] / 3 * startup_scan_counter / 3)
|
||||
ymax = int(frame_shape[0] / 3 + ymin)
|
||||
xmin = int(frame_shape[1] / 3 * startup_scan_counter / 3)
|
||||
xmax = int(frame_shape[1] / 3 + xmin)
|
||||
regions.append(
|
||||
calculate_region(
|
||||
frame_shape,
|
||||
xmin,
|
||||
ymin,
|
||||
xmax,
|
||||
ymax,
|
||||
region_min_size,
|
||||
multiplier=1.2,
|
||||
)
|
||||
)
|
||||
startup_scan_counter += 1
|
||||
#########
|
||||
# merge objects, check for clipped objects and look again up to 4 times
|
||||
#########
|
||||
refining = len(regions) > 0
|
||||
refine_count = 0
|
||||
while refining and refine_count < 4:
|
||||
refining = False
|
||||
|
||||
# resize regions and detect
|
||||
# seed with stationary objects
|
||||
detections = [
|
||||
(
|
||||
obj["label"],
|
||||
obj["score"],
|
||||
obj["box"],
|
||||
obj["area"],
|
||||
obj["region"],
|
||||
)
|
||||
for obj in object_tracker.tracked_objects.values()
|
||||
if obj["id"] in stationary_object_ids
|
||||
]
|
||||
# group by name
|
||||
detected_object_groups = defaultdict(lambda: [])
|
||||
for detection in detections:
|
||||
detected_object_groups[detection[0]].append(detection)
|
||||
|
||||
for region in regions:
|
||||
detections.extend(
|
||||
detect(
|
||||
object_detector,
|
||||
frame,
|
||||
model_shape,
|
||||
region,
|
||||
objects_to_track,
|
||||
object_filters,
|
||||
)
|
||||
)
|
||||
selected_objects = []
|
||||
for group in detected_object_groups.values():
|
||||
|
||||
#########
|
||||
# merge objects, check for clipped objects and look again up to 4 times
|
||||
#########
|
||||
refining = len(regions) > 0
|
||||
refine_count = 0
|
||||
while refining and refine_count < 4:
|
||||
refining = False
|
||||
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
|
||||
boxes = [
|
||||
(o[2][0], o[2][1], o[2][2] - o[2][0], o[2][3] - o[2][1])
|
||||
for o in group
|
||||
]
|
||||
confidences = [o[1] for o in group]
|
||||
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
|
||||
|
||||
# group by name
|
||||
detected_object_groups = defaultdict(lambda: [])
|
||||
for detection in detections:
|
||||
detected_object_groups[detection[0]].append(detection)
|
||||
for index in idxs:
|
||||
obj = group[index[0]]
|
||||
if clipped(obj, frame_shape):
|
||||
box = obj[2]
|
||||
# calculate a new region that will hopefully get the entire object
|
||||
region = calculate_region(
|
||||
frame_shape, box[0], box[1], box[2], box[3], region_min_size
|
||||
)
|
||||
|
||||
selected_objects = []
|
||||
for group in detected_object_groups.values():
|
||||
regions.append(region)
|
||||
|
||||
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
|
||||
boxes = [
|
||||
(o[2][0], o[2][1], o[2][2] - o[2][0], o[2][3] - o[2][1])
|
||||
for o in group
|
||||
]
|
||||
confidences = [o[1] for o in group]
|
||||
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
|
||||
|
||||
for index in idxs:
|
||||
obj = group[index[0]]
|
||||
if clipped(obj, frame_shape):
|
||||
box = obj[2]
|
||||
# calculate a new region that will hopefully get the entire object
|
||||
region = calculate_region(
|
||||
frame_shape,
|
||||
box[0],
|
||||
box[1],
|
||||
box[2],
|
||||
box[3],
|
||||
region_min_size,
|
||||
selected_objects.extend(
|
||||
detect(
|
||||
object_detector,
|
||||
frame,
|
||||
model_shape,
|
||||
region,
|
||||
objects_to_track,
|
||||
object_filters,
|
||||
)
|
||||
)
|
||||
|
||||
regions.append(region)
|
||||
refining = True
|
||||
else:
|
||||
selected_objects.append(obj)
|
||||
# set the detections list to only include top, complete objects
|
||||
# and new detections
|
||||
detections = selected_objects
|
||||
|
||||
selected_objects.extend(
|
||||
detect(
|
||||
object_detector,
|
||||
frame,
|
||||
model_shape,
|
||||
region,
|
||||
objects_to_track,
|
||||
object_filters,
|
||||
)
|
||||
)
|
||||
if refining:
|
||||
refine_count += 1
|
||||
|
||||
refining = True
|
||||
else:
|
||||
selected_objects.append(obj)
|
||||
# set the detections list to only include top, complete objects
|
||||
# and new detections
|
||||
detections = selected_objects
|
||||
## drop detections that overlap too much
|
||||
consolidated_detections = []
|
||||
|
||||
if refining:
|
||||
refine_count += 1
|
||||
# if detection was run on this frame, consolidate
|
||||
if len(regions) > 0:
|
||||
# group by name
|
||||
detected_object_groups = defaultdict(lambda: [])
|
||||
for detection in detections:
|
||||
detected_object_groups[detection[0]].append(detection)
|
||||
|
||||
## drop detections that overlap too much
|
||||
consolidated_detections = []
|
||||
# loop over detections grouped by label
|
||||
for group in detected_object_groups.values():
|
||||
# if the group only has 1 item, skip
|
||||
if len(group) == 1:
|
||||
consolidated_detections.append(group[0])
|
||||
continue
|
||||
|
||||
# if detection was run on this frame, consolidate
|
||||
if len(regions) > 0:
|
||||
# group by name
|
||||
detected_object_groups = defaultdict(lambda: [])
|
||||
for detection in detections:
|
||||
detected_object_groups[detection[0]].append(detection)
|
||||
# sort smallest to largest by area
|
||||
sorted_by_area = sorted(group, key=lambda g: g[3])
|
||||
|
||||
# loop over detections grouped by label
|
||||
for group in detected_object_groups.values():
|
||||
# if the group only has 1 item, skip
|
||||
if len(group) == 1:
|
||||
consolidated_detections.append(group[0])
|
||||
continue
|
||||
|
||||
# sort smallest to largest by area
|
||||
sorted_by_area = sorted(group, key=lambda g: g[3])
|
||||
|
||||
for current_detection_idx in range(0, len(sorted_by_area)):
|
||||
current_detection = sorted_by_area[current_detection_idx][2]
|
||||
overlap = 0
|
||||
for to_check_idx in range(
|
||||
min(current_detection_idx + 1, len(sorted_by_area)),
|
||||
len(sorted_by_area),
|
||||
for current_detection_idx in range(0, len(sorted_by_area)):
|
||||
current_detection = sorted_by_area[current_detection_idx][2]
|
||||
overlap = 0
|
||||
for to_check_idx in range(
|
||||
min(current_detection_idx + 1, len(sorted_by_area)),
|
||||
len(sorted_by_area),
|
||||
):
|
||||
to_check = sorted_by_area[to_check_idx][2]
|
||||
# if 90% of smaller detection is inside of another detection, consolidate
|
||||
if (
|
||||
area(intersection(current_detection, to_check))
|
||||
/ area(current_detection)
|
||||
> 0.9
|
||||
):
|
||||
to_check = sorted_by_area[to_check_idx][2]
|
||||
# if 90% of smaller detection is inside of another detection, consolidate
|
||||
if (
|
||||
area(intersection(current_detection, to_check))
|
||||
/ area(current_detection)
|
||||
> 0.9
|
||||
):
|
||||
overlap = 1
|
||||
break
|
||||
if overlap == 0:
|
||||
consolidated_detections.append(
|
||||
sorted_by_area[current_detection_idx]
|
||||
)
|
||||
# now that we have refined our detections, we need to track objects
|
||||
object_tracker.match_and_update(frame_time, consolidated_detections)
|
||||
# else, just update the frame times for the stationary objects
|
||||
else:
|
||||
object_tracker.update_frame_times(frame_time)
|
||||
overlap = 1
|
||||
break
|
||||
if overlap == 0:
|
||||
consolidated_detections.append(
|
||||
sorted_by_area[current_detection_idx]
|
||||
)
|
||||
# now that we have refined our detections, we need to track objects
|
||||
object_tracker.match_and_update(frame_time, consolidated_detections)
|
||||
# else, just update the frame times for the stationary objects
|
||||
else:
|
||||
object_tracker.update_frame_times(frame_time)
|
||||
|
||||
# add to the queue if not full
|
||||
if detected_objects_queue.full():
|
||||
|
||||
14794
web/package-lock.json
generated
14794
web/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,14 +1,6 @@
|
||||
import { h } from 'preact';
|
||||
import { useState } from 'preact/hooks';
|
||||
import {
|
||||
differenceInSeconds,
|
||||
fromUnixTime,
|
||||
format,
|
||||
parseISO,
|
||||
startOfHour,
|
||||
differenceInMinutes,
|
||||
differenceInHours,
|
||||
} from 'date-fns';
|
||||
import { addSeconds, differenceInSeconds, fromUnixTime, format, parseISO, startOfHour } from 'date-fns';
|
||||
import ArrowDropdown from '../icons/ArrowDropdown';
|
||||
import ArrowDropup from '../icons/ArrowDropup';
|
||||
import Link from '../components/Link';
|
||||
@@ -29,31 +21,25 @@ export default function RecordingPlaylist({ camera, recordings, selectedDate, se
|
||||
events={recording.events}
|
||||
selected={recording.date === selectedDate}
|
||||
>
|
||||
{recording.recordings
|
||||
.slice()
|
||||
.reverse()
|
||||
.map((item, i) => (
|
||||
<div className="mb-2 w-full">
|
||||
<div
|
||||
className={`flex w-full text-md text-white px-8 py-2 mb-2 ${
|
||||
i === 0 ? 'border-t border-white border-opacity-50' : ''
|
||||
}`}
|
||||
>
|
||||
<div className="flex-1">
|
||||
<Link href={`/recording/${camera}/${recording.date}/${item.hour}`} type="text">
|
||||
{item.hour}:00
|
||||
</Link>
|
||||
</div>
|
||||
<div className="flex-1 text-right">{item.events.length} Events</div>
|
||||
{recording.recordings.slice().reverse().map((item, i) => (
|
||||
<div className="mb-2 w-full">
|
||||
<div
|
||||
className={`flex w-full text-md text-white px-8 py-2 mb-2 ${
|
||||
i === 0 ? 'border-t border-white border-opacity-50' : ''
|
||||
}`}
|
||||
>
|
||||
<div className="flex-1">
|
||||
<Link href={`/recording/${camera}/${recording.date}/${item.hour}`} type="text">
|
||||
{item.hour}:00
|
||||
</Link>
|
||||
</div>
|
||||
{item.events
|
||||
.slice()
|
||||
.reverse()
|
||||
.map((event) => (
|
||||
<EventCard camera={camera} event={event} delay={item.delay} />
|
||||
))}
|
||||
<div className="flex-1 text-right">{item.events.length} Events</div>
|
||||
</div>
|
||||
))}
|
||||
{item.events.slice().reverse().map((event) => (
|
||||
<EventCard camera={camera} event={event} delay={item.delay} />
|
||||
))}
|
||||
</div>
|
||||
))}
|
||||
</ExpandableList>
|
||||
);
|
||||
}
|
||||
@@ -97,17 +83,8 @@ export function ExpandableList({ title, events = 0, children, selected = false }
|
||||
export function EventCard({ camera, event, delay }) {
|
||||
const apiHost = useApiHost();
|
||||
const start = fromUnixTime(event.start_time);
|
||||
let duration = 'In Progress';
|
||||
if (event.end_time) {
|
||||
const end = fromUnixTime(event.end_time);
|
||||
const hours = differenceInHours(end, start);
|
||||
const minutes = differenceInMinutes(end, start) - hours * 60;
|
||||
const seconds = differenceInSeconds(end, start) - hours * 60 - minutes * 60;
|
||||
duration = '';
|
||||
if (hours) duration += `${hours}h `;
|
||||
if (minutes) duration += `${minutes}m `;
|
||||
duration += `${seconds}s`;
|
||||
}
|
||||
const end = fromUnixTime(event.end_time);
|
||||
const duration = addSeconds(new Date(0), differenceInSeconds(end, start));
|
||||
const position = differenceInSeconds(start, startOfHour(start));
|
||||
const offset = Object.entries(delay)
|
||||
.map(([p, d]) => (position > p ? d : 0))
|
||||
@@ -125,7 +102,7 @@ export function EventCard({ camera, event, delay }) {
|
||||
<div className="flex-1">
|
||||
<div className="text-2xl text-white leading-tight capitalize">{event.label}</div>
|
||||
<div className="text-xs md:text-normal text-gray-300">Start: {format(start, 'HH:mm:ss')}</div>
|
||||
<div className="text-xs md:text-normal text-gray-300">Duration: {duration}</div>
|
||||
<div className="text-xs md:text-normal text-gray-300">Duration: {format(duration, 'mm:ss')}</div>
|
||||
</div>
|
||||
<div className="text-lg text-white text-right leading-tight">{(event.top_score * 100).toFixed(1)}%</div>
|
||||
</div>
|
||||
|
||||
@@ -29,8 +29,12 @@ function Camera({ name, conf }) {
|
||||
const { payload: snapshotValue, send: sendSnapshots } = useSnapshotsState(name);
|
||||
const href = `/cameras/${name}`;
|
||||
const buttons = useMemo(() => {
|
||||
return [{ name: 'Events', href: `/events?camera=${name}` }, { name: 'Recordings', href: `/recording/${name}` }];
|
||||
}, [name]);
|
||||
const result = [{ name: 'Events', href: `/events?camera=${name}` }];
|
||||
if (conf.record.enabled) {
|
||||
result.push({ name: 'Recordings', href: `/recording/${name}` });
|
||||
}
|
||||
return result;
|
||||
}, [name, conf.record.enabled]);
|
||||
const icons = useMemo(
|
||||
() => [
|
||||
{
|
||||
|
||||
@@ -66,9 +66,6 @@ export default function Recording({ camera, date, hour, seconds }) {
|
||||
this.player.currentTime(seconds);
|
||||
}
|
||||
}
|
||||
// Force playback rate to be correct
|
||||
const playbackRate = this.player.playbackRate();
|
||||
this.player.defaultPlaybackRate(playbackRate);
|
||||
}
|
||||
|
||||
return (
|
||||
|
||||
@@ -46,7 +46,7 @@ describe('Cameras Route', () => {
|
||||
|
||||
expect(screen.queryByLabelText('Loading…')).not.toBeInTheDocument();
|
||||
|
||||
expect(screen.queryAllByText('Recordings')).toHaveLength(2);
|
||||
expect(screen.queryAllByText('Recordings')).toHaveLength(1);
|
||||
});
|
||||
|
||||
test('buttons toggle detect, clips, and snapshots', async () => {
|
||||
|
||||
Reference in New Issue
Block a user