Compare commits

...

25 Commits

Author SHA1 Message Date
Blake Blackshear
db1255aa7f disable disk sync on startup 2021-12-13 06:51:03 -06:00
Blake Blackshear
609b436ed8 fix migrations 2021-12-13 06:50:06 -06:00
Blake Blackshear
95bdf9fe34 check for apex dir 2021-12-12 10:27:01 -06:00
Ryan McLean
251d29aa38 #2117 change entered_zones from set to list so that they are not automatically alphabetically ordered (#2212) 2021-12-12 09:29:57 -06:00
Justin Goette
156e1a4dc2 Allow for ".yaml" (#2244)
* allow for ".yaml"

* remove unused import
2021-12-12 09:27:05 -06:00
Matt Clayton
a5c13e7455 Add temperature of coral tpu to telemetry mqtt message 2021-12-12 09:22:32 -06:00
Blake Blackshear
fcb4aaef0d limit vod response cache 2021-12-12 09:21:45 -06:00
Blake Blackshear
589432bc89 update docs 2021-12-12 09:21:45 -06:00
Blake Blackshear
b19a02888a expire overlapping segments based on mode 2021-12-12 09:21:45 -06:00
Blake Blackshear
18fd50dfce store objects and motion counts in the db 2021-12-12 09:21:45 -06:00
Blake Blackshear
df0246aed8 warn when retention mismatch 2021-12-12 09:21:45 -06:00
Blake Blackshear
cbb2882123 refactor segment stats logic 2021-12-12 09:21:45 -06:00
Blake Blackshear
9f18629df3 switch to retain config instead of retain_days 2021-12-12 09:21:45 -06:00
Blake Blackshear
63f8034e46 pass processed tracked objects 2021-12-12 09:21:45 -06:00
Blake Blackshear
f3efc0667f retain frame data for recording maintenance 2021-12-12 09:21:45 -06:00
Blake Blackshear
af001321a8 fix process_clip 2021-12-12 09:21:45 -06:00
Blake Blackshear
92e08b92f5 sync recordings with disk once on startup 2021-12-12 09:21:45 -06:00
Blake Blackshear
26241b0877 no need to expire recordings every minute 2021-12-12 09:21:45 -06:00
Blake Blackshear
c1155af169 ensure cache copies when events have ended 2021-11-21 09:43:37 -06:00
Blake Blackshear
77c1f1bb1b cleanup missing files from database once per hour 2021-11-21 07:55:35 -06:00
Blake Blackshear
ae3c01fe2d handle missing file edge case 2021-11-21 07:26:31 -06:00
Blake Blackshear
7a2a85d253 log error messages on vod endpoints 2021-11-21 07:25:36 -06:00
Blake Blackshear
77c66d4e49 ensure duration > 0 for segments 2021-11-21 07:25:01 -06:00
Blake Blackshear
494e5ac4ec use snapshot url to support in progress events 2021-11-20 09:52:02 -06:00
Blake Blackshear
63b7465452 ensure stationary interval is greater than 0 2021-11-20 09:15:03 -06:00
15 changed files with 377 additions and 68 deletions

View File

@@ -58,7 +58,7 @@ http {
# vod caches
vod_metadata_cache metadata_cache 512m;
vod_mapping_cache mapping_cache 5m;
vod_mapping_cache mapping_cache 5m 10m;
# gzip manifests
gzip on;

View File

@@ -224,15 +224,23 @@ motion:
record:
# Optional: Enable recording (default: shown below)
enabled: False
# Optional: Number of days to retain recordings regardless of events (default: shown below)
# NOTE: This should be set to 0 and retention should be defined in events section below
# if you only want to retain recordings of events.
retain_days: 0
# Optional: Retention settings for recording
retain:
# Optional: Number of days to retain recordings regardless of events (default: shown below)
# NOTE: This should be set to 0 and retention should be defined in events section below
# if you only want to retain recordings of events.
days: 0
# Optional: Mode for retention. Available options are: all, motion, and active_objects
# all - save all recording segments regardless of activity
# motion - save all recordings segments with any detected motion
# active_objects - save all recording segments with active/moving objects
# NOTE: this mode only applies when the days setting above is greater than 0
mode: all
# Optional: Event recording settings
events:
# Optional: Maximum length of time to retain video during long events. (default: shown below)
# NOTE: If an object is being tracked for longer than this amount of time, the retained recordings
# will be the last x seconds of the event unless retain_days under record is > 0.
# will be the last x seconds of the event unless retain->days under record is > 0.
max_seconds: 300
# Optional: Number of seconds before the event to include (default: shown below)
pre_capture: 5
@@ -247,6 +255,16 @@ record:
retain:
# Required: Default retention days (default: shown below)
default: 10
# Optional: Mode for retention. (default: shown below)
# all - save all recording segments for events regardless of activity
# motion - save all recordings segments for events with any detected motion
# active_objects - save all recording segments for event with active/moving objects
#
# NOTE: If the retain mode for the camera is more restrictive than the mode configured
# here, the segments will already be gone by the time this mode is applied.
# For example, if the camera retain mode is "motion", the segments without motion are
# never stored, so setting the mode to "all" here won't bring them back.
mode: active_objects
# Optional: Per object retention days
objects:
person: 15

View File

@@ -14,12 +14,11 @@ If you only used clips in previous versions with recordings disabled, you can us
```yaml
record:
enabled: True
retain_days: 0
events:
retain:
default: 10
```
This configuration will retain recording segments that overlap with events for 10 days. Because multiple events can reference the same recording segments, this avoids storing duplicate footage for overlapping events and reduces overall storage needs.
This configuration will retain recording segments that overlap with events and have active tracked objects for 10 days. Because multiple events can reference the same recording segments, this avoids storing duplicate footage for overlapping events and reduces overall storage needs.
When `retain_days` is set to `0`, segments will be deleted from the cache if no events are in progress
When `retain_days` is set to `0`, segments will be deleted from the cache if no events are in progress.

View File

@@ -67,6 +67,12 @@ class FrigateApp:
def init_config(self):
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
# Check if we can use .yaml instead of .yml
config_file_yaml = config_file.replace(".yml", ".yaml")
if os.path.isfile(config_file_yaml):
config_file = config_file_yaml
user_config = FrigateConfig.parse_file(config_file)
self.config = user_config.runtime_config
@@ -108,6 +114,9 @@ class FrigateApp:
maxsize=len(self.config.cameras.keys()) * 2
)
# Queue for recordings info
self.recordings_info_queue = mp.Queue()
def init_database(self):
# Migrate DB location
old_db_path = os.path.join(CLIPS_DIR, "frigate.db")
@@ -206,6 +215,7 @@ class FrigateApp:
self.event_queue,
self.event_processed_queue,
self.video_output_queue,
self.recordings_info_queue,
self.stop_event,
)
self.detected_frames_processor.start()
@@ -273,7 +283,9 @@ class FrigateApp:
self.event_cleanup.start()
def start_recording_maintainer(self):
self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event)
self.recording_maintainer = RecordingMaintainer(
self.config, self.recordings_info_queue, self.stop_event
)
self.recording_maintainer.start()
def start_recording_cleanup(self):

View File

@@ -12,7 +12,7 @@ import yaml
from pydantic import BaseModel, Extra, Field, validator
from pydantic.fields import PrivateAttr
from frigate.const import BASE_DIR, CACHE_DIR
from frigate.const import BASE_DIR, CACHE_DIR, YAML_EXT
from frigate.edgetpu import load_labels
from frigate.util import create_mask, deep_merge
@@ -65,8 +65,17 @@ class MqttConfig(FrigateBaseModel):
return v
class RetainModeEnum(str, Enum):
all = "all"
motion = "motion"
active_objects = "active_objects"
class RetainConfig(FrigateBaseModel):
default: float = Field(default=10, title="Default retention period.")
mode: RetainModeEnum = Field(
default=RetainModeEnum.active_objects, title="Retain mode."
)
objects: Dict[str, float] = Field(
default_factory=dict, title="Object retention period."
)
@@ -88,9 +97,18 @@ class EventsConfig(FrigateBaseModel):
)
class RecordRetainConfig(FrigateBaseModel):
days: float = Field(default=0, title="Default retention period.")
mode: RetainModeEnum = Field(default=RetainModeEnum.all, title="Retain mode.")
class RecordConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable record on all cameras.")
retain_days: float = Field(default=0, title="Recording retention period in days.")
# deprecated - to be removed in a future version
retain_days: Optional[float] = Field(title="Recording retention period in days.")
retain: RecordRetainConfig = Field(
default_factory=RecordRetainConfig, title="Record retention settings."
)
events: EventsConfig = Field(
default_factory=EventsConfig, title="Event specific settings."
)
@@ -154,7 +172,8 @@ class DetectConfig(FrigateBaseModel):
title="Maximum number of frames the object can dissapear before detection ends."
)
stationary_interval: Optional[int] = Field(
title="Frame interval for checking stationary objects."
title="Frame interval for checking stationary objects.",
ge=1,
)
@@ -809,6 +828,25 @@ class FrigateConfig(FrigateBaseModel):
f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input."
)
# backwards compatibility for retain_days
if not camera_config.record.retain_days is None:
logger.warning(
"The 'retain_days' config option has been DEPRECATED and will be removed in a future version. Please use the 'days' setting under 'retain'"
)
if camera_config.record.retain.days == 0:
camera_config.record.retain.days = camera_config.record.retain_days
# warning if the higher level record mode is potentially more restrictive than the events
if (
camera_config.record.retain.days != 0
and camera_config.record.retain.mode != RetainModeEnum.all
and camera_config.record.events.retain.mode
!= camera_config.record.retain.mode
):
logger.warning(
f"Recording retention is configured for {camera_config.record.retain.mode} and event retention is configured for {camera_config.record.events.retain.mode}. The more restrictive retention policy will be applied."
)
config.cameras[name] = camera_config
return config
@@ -826,7 +864,7 @@ class FrigateConfig(FrigateBaseModel):
with open(config_file) as f:
raw_config = f.read()
if config_file.endswith(".yml"):
if config_file.endswith(YAML_EXT):
config = yaml.safe_load(raw_config)
elif config_file.endswith(".json"):
config = json.loads(raw_config)

View File

@@ -2,3 +2,4 @@ BASE_DIR = "/media/frigate"
CLIPS_DIR = f"{BASE_DIR}/clips"
RECORD_DIR = f"{BASE_DIR}/recordings"
CACHE_DIR = "/tmp/cache"
YAML_EXT = (".yaml", ".yml")

View File

@@ -658,10 +658,15 @@ def vod_ts(camera, start_ts, end_ts):
# Determine if we need to end the last clip early
if recording.end_time > end_ts:
duration -= int((recording.end_time - end_ts) * 1000)
clips.append(clip)
durations.append(duration)
if duration > 0:
clips.append(clip)
durations.append(duration)
else:
logger.warning(f"Recording clip is missing or empty: {recording.path}")
if not clips:
logger.error("No recordings found for the requested time range")
return "No recordings found.", 404
hour_ago = datetime.now() - timedelta(hours=1)
@@ -690,10 +695,12 @@ def vod_event(id):
try:
event: Event = Event.get(Event.id == id)
except DoesNotExist:
logger.error(f"Event not found: {id}")
return "Event not found.", 404
if not event.has_clip:
return "Clip not available", 404
logger.error(f"Event does not have recordings: {id}")
return "Recordings not available", 404
clip_path = os.path.join(CLIPS_DIR, f"{event.camera}-{id}.mp4")

View File

@@ -27,3 +27,5 @@ class Recordings(Model):
start_time = DateTimeField()
end_time = DateTimeField()
duration = FloatField()
motion = IntegerField(null=True)
objects = IntegerField(null=True)

View File

@@ -71,7 +71,7 @@ class TrackedObject:
self.camera_config = camera_config
self.frame_cache = frame_cache
self.current_zones = []
self.entered_zones = set()
self.entered_zones = []
self.false_positive = True
self.has_clip = False
self.has_snapshot = False
@@ -147,7 +147,8 @@ class TrackedObject:
# if the object passed the filters once, dont apply again
if name in self.current_zones or not zone_filtered(self, zone.filters):
current_zones.append(name)
self.entered_zones.add(name)
if name not in self.entered_zones:
self.entered_zones.append(name)
# if the zones changed, signal an update
if not self.false_positive and set(self.current_zones) != set(current_zones):
@@ -176,8 +177,9 @@ class TrackedObject:
"box": self.obj_data["box"],
"area": self.obj_data["area"],
"region": self.obj_data["region"],
"motionless_count": self.obj_data["motionless_count"],
"current_zones": self.current_zones.copy(),
"entered_zones": list(self.entered_zones).copy(),
"entered_zones": self.entered_zones.copy(),
"has_clip": self.has_clip,
"has_snapshot": self.has_snapshot,
}
@@ -584,6 +586,7 @@ class TrackedObjectProcessor(threading.Thread):
event_queue,
event_processed_queue,
video_output_queue,
recordings_info_queue,
stop_event,
):
threading.Thread.__init__(self)
@@ -595,6 +598,7 @@ class TrackedObjectProcessor(threading.Thread):
self.event_queue = event_queue
self.event_processed_queue = event_processed_queue
self.video_output_queue = video_output_queue
self.recordings_info_queue = recordings_info_queue
self.stop_event = stop_event
self.camera_states: Dict[str, CameraState] = {}
self.frame_manager = SharedMemoryFrameManager()
@@ -729,7 +733,7 @@ class TrackedObjectProcessor(threading.Thread):
# if there are required zones and there is no overlap
required_zones = snapshot_config.required_zones
if len(required_zones) > 0 and not obj.entered_zones & set(required_zones):
if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
logger.debug(
f"Not creating snapshot for {obj.obj_data['id']} because it did not enter required zones"
)
@@ -770,7 +774,7 @@ class TrackedObjectProcessor(threading.Thread):
def should_mqtt_snapshot(self, camera, obj: TrackedObject):
# if there are required zones and there is no overlap
required_zones = self.config.cameras[camera].mqtt.required_zones
if len(required_zones) > 0 and not obj.entered_zones & set(required_zones):
if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
logger.debug(
f"Not sending mqtt for {obj.obj_data['id']} because it did not enter required zones"
)
@@ -813,11 +817,26 @@ class TrackedObjectProcessor(threading.Thread):
frame_time, current_tracked_objects, motion_boxes, regions
)
tracked_objects = [
o.to_dict() for o in camera_state.tracked_objects.values()
]
self.video_output_queue.put(
(
camera,
frame_time,
current_tracked_objects,
tracked_objects,
motion_boxes,
regions,
)
)
# send info on this frame to the recordings maintainer
self.recordings_info_queue.put(
(
camera,
frame_time,
tracked_objects,
motion_boxes,
regions,
)

View File

@@ -1,22 +1,25 @@
import datetime
import time
import itertools
import logging
import multiprocessing as mp
import os
import queue
import random
import shutil
import string
import subprocess as sp
import threading
import time
from collections import defaultdict
from pathlib import Path
import psutil
from peewee import JOIN, DoesNotExist
from frigate.config import FrigateConfig
from frigate.config import RetainModeEnum, FrigateConfig
from frigate.const import CACHE_DIR, RECORD_DIR
from frigate.models import Event, Recordings
from frigate.util import area
logger = logging.getLogger(__name__)
@@ -40,22 +43,28 @@ def remove_empty_directories(directory):
class RecordingMaintainer(threading.Thread):
def __init__(self, config: FrigateConfig, stop_event):
def __init__(
self, config: FrigateConfig, recordings_info_queue: mp.Queue, stop_event
):
threading.Thread.__init__(self)
self.name = "recording_maint"
self.config = config
self.recordings_info_queue = recordings_info_queue
self.stop_event = stop_event
self.first_pass = True
self.recordings_info = defaultdict(list)
self.end_time_cache = {}
def move_files(self):
cache_files = [
d
for d in os.listdir(CACHE_DIR)
if os.path.isfile(os.path.join(CACHE_DIR, d))
and d.endswith(".mp4")
and not d.startswith("clip_")
]
cache_files = sorted(
[
d
for d in os.listdir(CACHE_DIR)
if os.path.isfile(os.path.join(CACHE_DIR, d))
and d.endswith(".mp4")
and not d.startswith("clip_")
]
)
files_in_use = []
for process in psutil.process_iter():
@@ -93,16 +102,22 @@ class RecordingMaintainer(threading.Thread):
keep_count = 5
for camera in grouped_recordings.keys():
if len(grouped_recordings[camera]) > keep_count:
sorted_recordings = sorted(
grouped_recordings[camera], key=lambda i: i["start_time"]
)
to_remove = sorted_recordings[:-keep_count]
to_remove = grouped_recordings[camera][:-keep_count]
for f in to_remove:
Path(f["cache_path"]).unlink(missing_ok=True)
self.end_time_cache.pop(f["cache_path"], None)
grouped_recordings[camera] = sorted_recordings[-keep_count:]
grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]
for camera, recordings in grouped_recordings.items():
# clear out all the recording info for old frames
while (
len(self.recordings_info[camera]) > 0
and self.recordings_info[camera][0][0]
< recordings[0]["start_time"].timestamp()
):
self.recordings_info[camera].pop(0)
# get all events with the end time after the start of the oldest cache file
# or with end_time None
events: Event = (
@@ -110,7 +125,7 @@ class RecordingMaintainer(threading.Thread):
.where(
Event.camera == camera,
(Event.end_time == None)
| (Event.end_time >= recordings[0]["start_time"]),
| (Event.end_time >= recordings[0]["start_time"].timestamp()),
Event.has_clip,
)
.order_by(Event.start_time)
@@ -151,12 +166,12 @@ class RecordingMaintainer(threading.Thread):
Path(cache_path).unlink(missing_ok=True)
continue
# if cached file's start_time is earlier than the retain_days for the camera
# if cached file's start_time is earlier than the retain days for the camera
if start_time <= (
(
datetime.datetime.now()
- datetime.timedelta(
days=self.config.cameras[camera].record.retain_days
days=self.config.cameras[camera].record.retain.days
)
)
):
@@ -167,15 +182,23 @@ class RecordingMaintainer(threading.Thread):
# and remove this segment
if event.start_time > end_time.timestamp():
overlaps = False
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
break
# if the event is in progress or ends after the recording starts, keep it
# and stop looking at events
if event.end_time is None or event.end_time >= start_time:
if (
event.end_time is None
or event.end_time >= start_time.timestamp()
):
overlaps = True
break
if overlaps:
record_mode = self.config.cameras[
camera
].record.events.retain.mode
# move from cache to recordings immediately
self.store_segment(
camera,
@@ -183,14 +206,57 @@ class RecordingMaintainer(threading.Thread):
end_time,
duration,
cache_path,
record_mode,
)
# else retain_days includes this segment
# else retain days includes this segment
else:
record_mode = self.config.cameras[camera].record.retain.mode
self.store_segment(
camera, start_time, end_time, duration, cache_path
camera, start_time, end_time, duration, cache_path, record_mode
)
def store_segment(self, camera, start_time, end_time, duration, cache_path):
def segment_stats(self, camera, start_time, end_time):
active_count = 0
motion_count = 0
for frame in self.recordings_info[camera]:
# frame is after end time of segment
if frame[0] > end_time.timestamp():
break
# frame is before start time of segment
if frame[0] < start_time.timestamp():
continue
active_count += len(
[
o
for o in frame[1]
if not o["false_positive"] and o["motionless_count"] > 0
]
)
motion_count += sum([area(box) for box in frame[2]])
return (motion_count, active_count)
def store_segment(
self,
camera,
start_time,
end_time,
duration,
cache_path,
store_mode: RetainModeEnum,
):
motion_count, active_count = self.segment_stats(camera, start_time, end_time)
# check if the segment shouldn't be stored
if (store_mode == RetainModeEnum.motion and motion_count == 0) or (
store_mode == RetainModeEnum.active_objects and active_count == 0
):
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
return
directory = os.path.join(RECORD_DIR, start_time.strftime("%Y-%m/%d/%H"), camera)
if not os.path.exists(directory):
@@ -218,6 +284,8 @@ class RecordingMaintainer(threading.Thread):
start_time=start_time.timestamp(),
end_time=end_time.timestamp(),
duration=duration,
motion=motion_count,
objects=active_count,
)
except Exception as e:
logger.error(f"Unable to store recording segment {cache_path}")
@@ -232,6 +300,30 @@ class RecordingMaintainer(threading.Thread):
wait_time = 5
while not self.stop_event.wait(wait_time):
run_start = datetime.datetime.now().timestamp()
# empty the recordings info queue
while True:
try:
(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = self.recordings_info_queue.get(False)
if self.config.cameras[camera].record.enabled:
self.recordings_info[camera].append(
(
frame_time,
current_tracked_objects,
motion_boxes,
regions,
)
)
except queue.Empty:
break
try:
self.move_files()
except Exception as e:
@@ -271,7 +363,7 @@ class RecordingCleanup(threading.Thread):
logger.debug("Start deleted cameras.")
# Handle deleted cameras
expire_days = self.config.record.retain_days
expire_days = self.config.record.retain.days
expire_before = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
@@ -297,7 +389,7 @@ class RecordingCleanup(threading.Thread):
datetime.datetime.now()
- datetime.timedelta(seconds=config.record.events.max_seconds)
).timestamp()
expire_days = config.record.retain_days
expire_days = config.record.retain.days
expire_before = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
@@ -328,6 +420,7 @@ class RecordingCleanup(threading.Thread):
)
# loop over recordings and see if they overlap with any non-expired events
# TODO: expire segments based on segment stats according to config
event_start = 0
deleted_recordings = set()
for recording in recordings.objects().iterator():
@@ -355,8 +448,19 @@ class RecordingCleanup(threading.Thread):
if event.end_time < recording.start_time:
event_start = idx
# Delete recordings outside of the retention window
if not keep:
# Delete recordings outside of the retention window or based on the retention mode
if (
not keep
or (
config.record.events.retain.mode == RetainModeEnum.motion
and recording.motion == 0
)
or (
config.record.events.retain.mode
== RetainModeEnum.active_objects
and recording.objects == 0
)
):
Path(recording.path).unlink(missing_ok=True)
deleted_recordings.add(recording.id)
@@ -373,14 +477,14 @@ class RecordingCleanup(threading.Thread):
default_expire = (
datetime.datetime.now().timestamp()
- SECONDS_IN_DAY * self.config.record.retain_days
- SECONDS_IN_DAY * self.config.record.retain.days
)
delete_before = {}
for name, camera in self.config.cameras.items():
delete_before[name] = (
datetime.datetime.now().timestamp()
- SECONDS_IN_DAY * camera.record.retain_days
- SECONDS_IN_DAY * camera.record.retain.days
)
# find all the recordings older than the oldest recording in the db
@@ -405,21 +509,52 @@ class RecordingCleanup(threading.Thread):
for f in files_to_check:
p = Path(f)
if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire):
p.unlink(missing_ok=True)
try:
if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire):
p.unlink(missing_ok=True)
except FileNotFoundError:
logger.warning(f"Attempted to expire missing file: {f}")
logger.debug("End expire files (legacy).")
def sync_recordings(self):
logger.debug("Start sync recordings.")
# get all recordings in the db
recordings: Recordings = Recordings.select()
# get all recordings files on disk
process = sp.run(
["find", RECORD_DIR, "-type", "f"],
capture_output=True,
text=True,
)
files_on_disk = process.stdout.splitlines()
recordings_to_delete = []
for recording in recordings.objects().iterator():
if not recording.path in files_on_disk:
recordings_to_delete.append(recording.id)
logger.debug(
f"Deleting {len(recordings_to_delete)} recordings with missing files"
)
Recordings.delete().where(Recordings.id << recordings_to_delete).execute()
logger.debug("End sync recordings.")
def run(self):
# Expire recordings every minute, clean directories every hour.
# on startup sync recordings with disk (disabled due to too much CPU usage)
# self.sync_recordings()
# Expire tmp clips every minute, recordings and clean directories every hour.
for counter in itertools.cycle(range(60)):
if self.stop_event.wait(60):
logger.info(f"Exiting recording cleanup...")
break
self.expire_recordings()
self.clean_tmp_clips()
if counter == 0:
self.expire_recordings()
self.expire_files()
remove_empty_directories(RECORD_DIR)

View File

@@ -4,6 +4,7 @@ import threading
import time
import psutil
import shutil
import os
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
@@ -31,6 +32,28 @@ def get_fs_type(path):
return fsType
def read_temperature(path):
if os.path.isfile(path):
with open(path) as f:
line = f.readline().strip()
return int(line) / 1000
return None
def get_temperatures():
temps = {}
# Get temperatures for all attached Corals
base = "/sys/class/apex/"
if os.path.isdir(base):
for apex in os.listdir(base):
temp = read_temperature(os.path.join(base, apex, "temp"))
if temp is not None:
temps[apex] = temp
return temps
def stats_snapshot(stats_tracking):
camera_metrics = stats_tracking["camera_metrics"]
stats = {}
@@ -61,6 +84,7 @@ def stats_snapshot(stats_tracking):
"uptime": (int(time.time()) - stats_tracking["started"]),
"version": VERSION,
"storage": {},
"temperatures": get_temperatures(),
}
for path in [RECORD_DIR, CLIPS_DIR, CACHE_DIR, "/dev/shm"]:

View File

@@ -28,17 +28,19 @@ SQL = pw.SQL
def migrate(migrator, database, fake=False, **kwargs):
migrator.create_model(Recordings)
def add_index():
# First add the index here, because there is a bug in peewee_migrate
# when trying to create an multi-column index in the same migration
# as the table: https://github.com/klen/peewee_migrate/issues/19
Recordings.add_index("start_time", "end_time")
Recordings.create_table()
migrator.python(add_index)
migrator.sql(
'CREATE TABLE IF NOT EXISTS "recordings" ("id" VARCHAR(30) NOT NULL PRIMARY KEY, "camera" VARCHAR(20) NOT NULL, "path" VARCHAR(255) NOT NULL, "start_time" DATETIME NOT NULL, "end_time" DATETIME NOT NULL, "duration" REAL NOT NULL)'
)
migrator.sql(
'CREATE INDEX IF NOT EXISTS "recordings_camera" ON "recordings" ("camera")'
)
migrator.sql(
'CREATE UNIQUE INDEX IF NOT EXISTS "recordings_path" ON "recordings" ("path")'
)
migrator.sql(
'CREATE INDEX IF NOT EXISTS "recordings_start_time_end_time" ON "recordings" (start_time, end_time)'
)
def rollback(migrator, database, fake=False, **kwargs):
migrator.remove_model(Recordings)
pass

View File

@@ -0,0 +1,47 @@
"""Peewee migrations -- 004_add_bbox_region_area.py.
Some examples (model - class or model name)::
> Model = migrator.orm['model_name'] # Return model in current state by name
> migrator.sql(sql) # Run custom SQL
> migrator.python(func, *args, **kwargs) # Run python code
> migrator.create_model(Model) # Create a model (could be used as decorator)
> migrator.remove_model(model, cascade=True) # Remove a model
> migrator.add_fields(model, **fields) # Add fields to a model
> migrator.change_fields(model, **fields) # Change fields
> migrator.remove_fields(model, *field_names, cascade=True)
> migrator.rename_field(model, old_field_name, new_field_name)
> migrator.rename_table(model, new_table_name)
> migrator.add_index(model, *col_names, unique=False)
> migrator.drop_index(model, *col_names)
> migrator.add_not_null(model, *field_names)
> migrator.drop_not_null(model, *field_names)
> migrator.add_default(model, field_name, default)
"""
import datetime as dt
import peewee as pw
from playhouse.sqlite_ext import *
from decimal import ROUND_HALF_EVEN
from frigate.models import Recordings
try:
import playhouse.postgres_ext as pw_pext
except ImportError:
pass
SQL = pw.SQL
def migrate(migrator, database, fake=False, **kwargs):
migrator.add_fields(
Recordings,
objects=pw.IntegerField(null=True),
motion=pw.IntegerField(null=True),
)
def rollback(migrator, database, fake=False, **kwargs):
migrator.remove_fields(Recordings, ["objects", "motion"])

View File

@@ -162,9 +162,12 @@ class ProcessClip:
)
total_regions += len(regions)
total_motion_boxes += len(motion_boxes)
top_score = 0
for id, obj in self.camera_state.tracked_objects.items():
if not obj.false_positive:
object_ids.add(id)
if obj.top_score > top_score:
top_score = obj.top_score
total_frames += 1
@@ -175,6 +178,7 @@ class ProcessClip:
"total_motion_boxes": total_motion_boxes,
"true_positive_objects": len(object_ids),
"total_frames": total_frames,
"top_score": top_score,
}
def save_debug_frame(self, debug_path, frame_time, tracked_objects):
@@ -277,6 +281,7 @@ def process(path, label, output, debug_path):
frigate_config = FrigateConfig(**json_config)
runtime_config = frigate_config.runtime_config
runtime_config.cameras["camera"].create_ffmpeg_cmds()
process_clip = ProcessClip(c, frame_shape, runtime_config)
process_clip.load_frames()

View File

@@ -199,7 +199,7 @@ export default function Event({ eventId, close, scrollRef }) {
<img
src={
data.has_snapshot
? `${apiHost}/clips/${data.camera}-${eventId}.jpg`
? `${apiHost}/api/events/${eventId}/snapshot.jpg`
: `data:image/jpeg;base64,${data.thumbnail}`
}
alt={`${data.label} at ${(data.top_score * 100).toFixed(1)}% confidence`}