forked from Github/frigate
Refactor recordings retention to be based on review items (#13355)
* Refactor recordings config to be based off of review items * Update object processing logic for when an event is created * Migrate to deciding recording retention based on review items * Refactor recording expiration to be based off of review items * Remove remainder of recording events access * Handle migration automatically * Update version and cleanup * Update docs * Clarify docs * Cleanup * Target camera config * Safely access all fields
This commit is contained in:
@@ -296,12 +296,9 @@ class RetainModeEnum(str, Enum):
|
||||
active_objects = "active_objects"
|
||||
|
||||
|
||||
class RetainConfig(FrigateBaseModel):
|
||||
default: float = Field(default=10, title="Default retention period.")
|
||||
mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
|
||||
objects: Dict[str, float] = Field(
|
||||
default_factory=dict, title="Object retention period."
|
||||
)
|
||||
class RecordRetainConfig(FrigateBaseModel):
|
||||
days: float = Field(default=0, title="Default retention period.")
|
||||
mode: RetainModeEnum = Field(default=RetainModeEnum.all, title="Retain mode.")
|
||||
|
||||
|
||||
class EventsConfig(FrigateBaseModel):
|
||||
@@ -309,18 +306,9 @@ class EventsConfig(FrigateBaseModel):
|
||||
default=5, title="Seconds to retain before event starts.", le=MAX_PRE_CAPTURE
|
||||
)
|
||||
post_capture: int = Field(default=5, title="Seconds to retain after event ends.")
|
||||
objects: Optional[List[str]] = Field(
|
||||
None,
|
||||
title="List of objects to be detected in order to save the event.",
|
||||
retain: RecordRetainConfig = Field(
|
||||
default_factory=RecordRetainConfig, title="Event retention settings."
|
||||
)
|
||||
retain: RetainConfig = Field(
|
||||
default_factory=RetainConfig, title="Event retention settings."
|
||||
)
|
||||
|
||||
|
||||
class RecordRetainConfig(FrigateBaseModel):
|
||||
days: float = Field(default=0, title="Default retention period.")
|
||||
mode: RetainModeEnum = Field(default=RetainModeEnum.all, title="Retain mode.")
|
||||
|
||||
|
||||
class RecordExportConfig(FrigateBaseModel):
|
||||
@@ -355,8 +343,11 @@ class RecordConfig(FrigateBaseModel):
|
||||
retain: RecordRetainConfig = Field(
|
||||
default_factory=RecordRetainConfig, title="Record retention settings."
|
||||
)
|
||||
events: EventsConfig = Field(
|
||||
default_factory=EventsConfig, title="Event specific settings."
|
||||
detections: EventsConfig = Field(
|
||||
default_factory=EventsConfig, title="Detection specific retention settings."
|
||||
)
|
||||
alerts: EventsConfig = Field(
|
||||
default_factory=EventsConfig, title="Alert specific retention settings."
|
||||
)
|
||||
export: RecordExportConfig = Field(
|
||||
default_factory=RecordExportConfig, title="Recording Export Config"
|
||||
@@ -924,6 +915,14 @@ class CameraFfmpegConfig(FfmpegConfig):
|
||||
return v
|
||||
|
||||
|
||||
class RetainConfig(FrigateBaseModel):
|
||||
default: float = Field(default=10, title="Default retention period.")
|
||||
mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
|
||||
objects: Dict[str, float] = Field(
|
||||
default_factory=dict, title="Object retention period."
|
||||
)
|
||||
|
||||
|
||||
class SnapshotsConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Snapshots enabled.")
|
||||
clean_copy: bool = Field(
|
||||
@@ -1278,10 +1277,19 @@ def verify_recording_retention(camera_config: CameraConfig) -> None:
|
||||
if (
|
||||
camera_config.record.retain.days != 0
|
||||
and rank_map[camera_config.record.retain.mode]
|
||||
> rank_map[camera_config.record.events.retain.mode]
|
||||
> rank_map[camera_config.record.alerts.retain.mode]
|
||||
):
|
||||
logger.warning(
|
||||
f"{camera_config.name}: Recording retention is configured for {camera_config.record.retain.mode} and event retention is configured for {camera_config.record.events.retain.mode}. The more restrictive retention policy will be applied."
|
||||
f"{camera_config.name}: Recording retention is configured for {camera_config.record.retain.mode} and alert retention is configured for {camera_config.record.alerts.retain.mode}. The more restrictive retention policy will be applied."
|
||||
)
|
||||
|
||||
if (
|
||||
camera_config.record.retain.days != 0
|
||||
and rank_map[camera_config.record.retain.mode]
|
||||
> rank_map[camera_config.record.detections.retain.mode]
|
||||
):
|
||||
logger.warning(
|
||||
f"{camera_config.name}: Recording retention is configured for {camera_config.record.retain.mode} and detection retention is configured for {camera_config.record.detections.retain.mode}. The more restrictive retention policy will be applied."
|
||||
)
|
||||
|
||||
|
||||
@@ -1429,7 +1437,7 @@ class FrigateConfig(FrigateBaseModel):
|
||||
default_factory=TimestampStyleConfig,
|
||||
title="Global timestamp style configuration.",
|
||||
)
|
||||
version: Optional[float] = Field(default=None, title="Current config version.")
|
||||
version: Optional[str] = Field(default=None, title="Current config version.")
|
||||
|
||||
def runtime_config(self, plus_api: PlusApi = None) -> FrigateConfig:
|
||||
"""Merge camera config with globals."""
|
||||
|
||||
@@ -68,7 +68,10 @@ class EventCleanup(threading.Thread):
|
||||
def expire(self, media_type: EventCleanupType) -> list[str]:
|
||||
## Expire events from unlisted cameras based on the global config
|
||||
if media_type == EventCleanupType.clips:
|
||||
retain_config = self.config.record.events.retain
|
||||
expire_days = max(
|
||||
self.config.record.alerts.retain.days,
|
||||
self.config.record.detections.retain.days,
|
||||
)
|
||||
file_extension = None # mp4 clips are no longer stored in /clips
|
||||
update_params = {"has_clip": False}
|
||||
else:
|
||||
@@ -82,7 +85,11 @@ class EventCleanup(threading.Thread):
|
||||
# loop over object types in db
|
||||
for event in distinct_labels:
|
||||
# get expiration time for this label
|
||||
expire_days = retain_config.objects.get(event.label, retain_config.default)
|
||||
if media_type == EventCleanupType.snapshots:
|
||||
expire_days = retain_config.objects.get(
|
||||
event.label, retain_config.default
|
||||
)
|
||||
|
||||
expire_after = (
|
||||
datetime.datetime.now() - datetime.timedelta(days=expire_days)
|
||||
).timestamp()
|
||||
@@ -132,7 +139,10 @@ class EventCleanup(threading.Thread):
|
||||
## Expire events from cameras based on the camera config
|
||||
for name, camera in self.config.cameras.items():
|
||||
if media_type == EventCleanupType.clips:
|
||||
retain_config = camera.record.events.retain
|
||||
expire_days = max(
|
||||
camera.record.alerts.retain.days,
|
||||
camera.record.detections.retain.days,
|
||||
)
|
||||
else:
|
||||
retain_config = camera.snapshots.retain
|
||||
|
||||
@@ -142,9 +152,11 @@ class EventCleanup(threading.Thread):
|
||||
# loop over object types in db
|
||||
for event in distinct_labels:
|
||||
# get expiration time for this label
|
||||
expire_days = retain_config.objects.get(
|
||||
event.label, retain_config.default
|
||||
)
|
||||
if media_type == EventCleanupType.snapshots:
|
||||
expire_days = retain_config.objects.get(
|
||||
event.label, retain_config.default
|
||||
)
|
||||
|
||||
expire_after = (
|
||||
datetime.datetime.now() - datetime.timedelta(days=expire_days)
|
||||
).timestamp()
|
||||
|
||||
@@ -5,7 +5,7 @@ from multiprocessing.synchronize import Event as MpEvent
|
||||
from typing import Dict
|
||||
|
||||
from frigate.comms.events_updater import EventEndPublisher, EventUpdateSubscriber
|
||||
from frigate.config import EventsConfig, FrigateConfig
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.events.types import EventStateEnum, EventTypeEnum
|
||||
from frigate.models import Event
|
||||
from frigate.util.builtin import to_relative_box
|
||||
@@ -128,16 +128,13 @@ class EventProcessor(threading.Thread):
|
||||
if should_update_db(self.events_in_process[event_data["id"]], event_data):
|
||||
updated_db = True
|
||||
camera_config = self.config.cameras[camera]
|
||||
event_config: EventsConfig = camera_config.record.events
|
||||
width = camera_config.detect.width
|
||||
height = camera_config.detect.height
|
||||
first_detector = list(self.config.detectors.values())[0]
|
||||
|
||||
start_time = event_data["start_time"] - event_config.pre_capture
|
||||
start_time = event_data["start_time"]
|
||||
end_time = (
|
||||
None
|
||||
if event_data["end_time"] is None
|
||||
else event_data["end_time"] + event_config.post_capture
|
||||
None if event_data["end_time"] is None else event_data["end_time"]
|
||||
)
|
||||
# score of the snapshot
|
||||
score = (
|
||||
|
||||
@@ -1070,25 +1070,27 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
if obj.obj_data["position_changes"] == 0:
|
||||
return False
|
||||
|
||||
# If there are required zones and there is no overlap
|
||||
# If the object is not considered an alert or detection
|
||||
review_config = self.config.cameras[camera].review
|
||||
required_zones = (
|
||||
review_config.alerts.required_zones
|
||||
+ review_config.detections.required_zones
|
||||
)
|
||||
if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
|
||||
logger.debug(
|
||||
f"Not creating clip for {obj.obj_data['id']} because it did not enter required zones"
|
||||
if not (
|
||||
(
|
||||
obj.obj_data["label"] in review_config.alerts.labels
|
||||
and (
|
||||
not review_config.alerts.required_zones
|
||||
or set(obj.entered_zones) & set(review_config.alerts.required_zones)
|
||||
)
|
||||
)
|
||||
or (
|
||||
not review_config.detections.labels
|
||||
or obj.obj_data["label"] in review_config.detections.labels
|
||||
)
|
||||
and (
|
||||
not review_config.detections.required_zones
|
||||
or set(obj.entered_zones) & set(review_config.alerts.required_zones)
|
||||
)
|
||||
return False
|
||||
|
||||
# If the required objects are not present
|
||||
if (
|
||||
record_config.events.objects is not None
|
||||
and obj.obj_data["label"] not in record_config.events.objects
|
||||
):
|
||||
logger.debug(
|
||||
f"Not creating clip for {obj.obj_data['id']} because it did not contain required objects"
|
||||
f"Not creating clip for {obj.obj_data['id']} because it did not qualify as an alert or detection"
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ from playhouse.sqlite_ext import SqliteExtDatabase
|
||||
|
||||
from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR
|
||||
from frigate.models import Event, Previews, Recordings, ReviewSegment
|
||||
from frigate.models import Previews, Recordings, ReviewSegment
|
||||
from frigate.record.util import remove_empty_directories, sync_recordings
|
||||
from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time
|
||||
|
||||
@@ -61,8 +61,42 @@ class RecordingCleanup(threading.Thread):
|
||||
db.execute_sql("PRAGMA wal_checkpoint(TRUNCATE);")
|
||||
db.close()
|
||||
|
||||
def expire_review_segments(self, config: CameraConfig, now: datetime) -> None:
|
||||
"""Delete review segments that are expired"""
|
||||
alert_expire_date = (
|
||||
now - datetime.timedelta(days=config.record.alerts.retain.days)
|
||||
).timestamp()
|
||||
detection_expire_date = (
|
||||
now - datetime.timedelta(days=config.record.detections.retain.days)
|
||||
).timestamp()
|
||||
expired_reviews: ReviewSegment = (
|
||||
ReviewSegment.select(ReviewSegment.id)
|
||||
.where(
|
||||
ReviewSegment.camera == config.name
|
||||
and (
|
||||
(
|
||||
ReviewSegment.severity == "alert"
|
||||
and ReviewSegment.end_time < alert_expire_date
|
||||
)
|
||||
or (
|
||||
ReviewSegment.severity == "detection"
|
||||
and ReviewSegment.end_time < detection_expire_date
|
||||
)
|
||||
)
|
||||
)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
max_deletes = 100000
|
||||
deleted_reviews_list = list(map(lambda x: x[0], expired_reviews))
|
||||
logger.info(f"the list is {deleted_reviews_list}")
|
||||
for i in range(0, len(deleted_reviews_list), max_deletes):
|
||||
ReviewSegment.delete().where(
|
||||
ReviewSegment.id << deleted_reviews_list[i : i + max_deletes]
|
||||
).execute()
|
||||
|
||||
def expire_existing_camera_recordings(
|
||||
self, expire_date: float, config: CameraConfig, events: Event
|
||||
self, expire_date: float, config: CameraConfig, reviews: ReviewSegment
|
||||
) -> None:
|
||||
"""Delete recordings for existing camera based on retention config."""
|
||||
# Get the timestamp for cutoff of retained days
|
||||
@@ -86,47 +120,47 @@ class RecordingCleanup(threading.Thread):
|
||||
.iterator()
|
||||
)
|
||||
|
||||
# loop over recordings and see if they overlap with any non-expired events
|
||||
# loop over recordings and see if they overlap with any non-expired reviews
|
||||
# TODO: expire segments based on segment stats according to config
|
||||
event_start = 0
|
||||
review_start = 0
|
||||
deleted_recordings = set()
|
||||
kept_recordings: list[tuple[float, float]] = []
|
||||
for recording in recordings:
|
||||
keep = False
|
||||
mode = None
|
||||
# Now look for a reason to keep this recording segment
|
||||
for idx in range(event_start, len(events)):
|
||||
event: Event = events[idx]
|
||||
for idx in range(review_start, len(reviews)):
|
||||
review: ReviewSegment = reviews[idx]
|
||||
|
||||
# if the event starts in the future, stop checking events
|
||||
# if the review starts in the future, stop checking reviews
|
||||
# and let this recording segment expire
|
||||
if event.start_time > recording.end_time:
|
||||
if review.start_time > recording.end_time:
|
||||
keep = False
|
||||
break
|
||||
|
||||
# if the event is in progress or ends after the recording starts, keep it
|
||||
# and stop looking at events
|
||||
if event.end_time is None or event.end_time >= recording.start_time:
|
||||
# if the review is in progress or ends after the recording starts, keep it
|
||||
# and stop looking at reviews
|
||||
if review.end_time is None or review.end_time >= recording.start_time:
|
||||
keep = True
|
||||
mode = (
|
||||
config.record.alerts.retain.mode
|
||||
if review.severity == "alert"
|
||||
else config.record.detections.retain.mode
|
||||
)
|
||||
break
|
||||
|
||||
# if the event ends before this recording segment starts, skip
|
||||
# this event and check the next event for an overlap.
|
||||
# since the events and recordings are sorted, we can skip events
|
||||
# if the review ends before this recording segment starts, skip
|
||||
# this review and check the next review for an overlap.
|
||||
# since the review and recordings are sorted, we can skip review
|
||||
# that end before the previous recording segment started on future segments
|
||||
if event.end_time < recording.start_time:
|
||||
event_start = idx
|
||||
if review.end_time < recording.start_time:
|
||||
review_start = idx
|
||||
|
||||
# Delete recordings outside of the retention window or based on the retention mode
|
||||
if (
|
||||
not keep
|
||||
or (
|
||||
config.record.events.retain.mode == RetainModeEnum.motion
|
||||
and recording.motion == 0
|
||||
)
|
||||
or (
|
||||
config.record.events.retain.mode == RetainModeEnum.active_objects
|
||||
and recording.objects == 0
|
||||
)
|
||||
or (mode == RetainModeEnum.motion and recording.motion == 0)
|
||||
or (mode == RetainModeEnum.active_objects and recording.objects == 0)
|
||||
):
|
||||
Path(recording.path).unlink(missing_ok=True)
|
||||
deleted_recordings.add(recording.id)
|
||||
@@ -202,65 +236,6 @@ class RecordingCleanup(threading.Thread):
|
||||
Previews.id << deleted_previews_list[i : i + max_deletes]
|
||||
).execute()
|
||||
|
||||
review_segments: list[ReviewSegment] = (
|
||||
ReviewSegment.select(
|
||||
ReviewSegment.id,
|
||||
ReviewSegment.start_time,
|
||||
ReviewSegment.end_time,
|
||||
ReviewSegment.thumb_path,
|
||||
)
|
||||
.where(
|
||||
ReviewSegment.camera == config.name,
|
||||
ReviewSegment.end_time < expire_date,
|
||||
)
|
||||
.order_by(ReviewSegment.start_time)
|
||||
.namedtuples()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
# expire review segments
|
||||
recording_start = 0
|
||||
deleted_segments = set()
|
||||
for segment in review_segments:
|
||||
keep = False
|
||||
# look for a reason to keep this segment
|
||||
for idx in range(recording_start, len(kept_recordings)):
|
||||
start_time, end_time = kept_recordings[idx]
|
||||
|
||||
# if the recording starts in the future, stop checking recordings
|
||||
# and let this segment expire
|
||||
if start_time > segment.end_time:
|
||||
keep = False
|
||||
break
|
||||
|
||||
# if the recording ends after the segment starts, keep it
|
||||
# and stop looking at recordings
|
||||
if end_time >= segment.start_time:
|
||||
keep = True
|
||||
break
|
||||
|
||||
# if the recording ends before this segment starts, skip
|
||||
# this recording and check the next recording for an overlap.
|
||||
# since the kept recordings and segments are sorted, we can skip recordings
|
||||
# that end before the current segment started
|
||||
if end_time < segment.start_time:
|
||||
recording_start = idx
|
||||
|
||||
# Delete segments without any relevant recordings
|
||||
if not keep:
|
||||
Path(segment.thumb_path).unlink(missing_ok=True)
|
||||
deleted_segments.add(segment.id)
|
||||
|
||||
# expire segments
|
||||
logger.debug(f"Expiring {len(deleted_segments)} segments")
|
||||
# delete up to 100,000 at a time
|
||||
max_deletes = 100000
|
||||
deleted_segments_list = list(deleted_segments)
|
||||
for i in range(0, len(deleted_segments_list), max_deletes):
|
||||
ReviewSegment.delete().where(
|
||||
ReviewSegment.id << deleted_segments_list[i : i + max_deletes]
|
||||
).execute()
|
||||
|
||||
def expire_recordings(self) -> None:
|
||||
"""Delete recordings based on retention config."""
|
||||
logger.debug("Start expire recordings.")
|
||||
@@ -302,30 +277,31 @@ class RecordingCleanup(threading.Thread):
|
||||
logger.debug("Start all cameras.")
|
||||
for camera, config in self.config.cameras.items():
|
||||
logger.debug(f"Start camera: {camera}.")
|
||||
now = datetime.datetime.now()
|
||||
|
||||
self.expire_review_segments(config, now)
|
||||
|
||||
expire_days = config.record.retain.days
|
||||
expire_date = (
|
||||
datetime.datetime.now() - datetime.timedelta(days=expire_days)
|
||||
).timestamp()
|
||||
expire_date = (now - datetime.timedelta(days=expire_days)).timestamp()
|
||||
|
||||
# Get all the events to check against
|
||||
events: Event = (
|
||||
Event.select(
|
||||
Event.start_time,
|
||||
Event.end_time,
|
||||
# Get all the reviews to check against
|
||||
reviews: ReviewSegment = (
|
||||
ReviewSegment.select(
|
||||
ReviewSegment.start_time,
|
||||
ReviewSegment.end_time,
|
||||
ReviewSegment.severity,
|
||||
)
|
||||
.where(
|
||||
Event.camera == camera,
|
||||
# need to ensure segments for all events starting
|
||||
ReviewSegment.camera == camera,
|
||||
# need to ensure segments for all reviews starting
|
||||
# before the expire date are included
|
||||
Event.start_time < expire_date,
|
||||
Event.has_clip,
|
||||
ReviewSegment.start_time < expire_date,
|
||||
)
|
||||
.order_by(Event.start_time)
|
||||
.order_by(ReviewSegment.start_time)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
self.expire_existing_camera_recordings(expire_date, config, events)
|
||||
self.expire_existing_camera_recordings(expire_date, config, reviews)
|
||||
logger.debug(f"End camera: {camera}.")
|
||||
|
||||
logger.debug("End all cameras.")
|
||||
|
||||
@@ -28,7 +28,7 @@ from frigate.const import (
|
||||
MAX_SEGMENTS_IN_CACHE,
|
||||
RECORD_DIR,
|
||||
)
|
||||
from frigate.models import Event, Recordings
|
||||
from frigate.models import Recordings, ReviewSegment
|
||||
from frigate.util.services import get_video_properties
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -159,25 +159,27 @@ class RecordingMaintainer(threading.Thread):
|
||||
):
|
||||
self.audio_recordings_info[camera].pop(0)
|
||||
|
||||
# get all events with the end time after the start of the oldest cache file
|
||||
# get all reviews with the end time after the start of the oldest cache file
|
||||
# or with end_time None
|
||||
events: Event = (
|
||||
Event.select(
|
||||
Event.start_time,
|
||||
Event.end_time,
|
||||
Event.data,
|
||||
reviews: ReviewSegment = (
|
||||
ReviewSegment.select(
|
||||
ReviewSegment.start_time,
|
||||
ReviewSegment.end_time,
|
||||
ReviewSegment.data,
|
||||
)
|
||||
.where(
|
||||
Event.camera == camera,
|
||||
(Event.end_time == None)
|
||||
| (Event.end_time >= recordings[0]["start_time"].timestamp()),
|
||||
Event.has_clip,
|
||||
ReviewSegment.camera == camera,
|
||||
(ReviewSegment.end_time == None)
|
||||
| (
|
||||
ReviewSegment.end_time
|
||||
>= recordings[0]["start_time"].timestamp()
|
||||
),
|
||||
)
|
||||
.order_by(Event.start_time)
|
||||
.order_by(ReviewSegment.start_time)
|
||||
)
|
||||
|
||||
tasks.extend(
|
||||
[self.validate_and_move_segment(camera, events, r) for r in recordings]
|
||||
[self.validate_and_move_segment(camera, reviews, r) for r in recordings]
|
||||
)
|
||||
|
||||
recordings_to_insert: list[Optional[Recordings]] = await asyncio.gather(*tasks)
|
||||
@@ -189,10 +191,11 @@ class RecordingMaintainer(threading.Thread):
|
||||
)
|
||||
|
||||
async def validate_and_move_segment(
|
||||
self, camera: str, events: list[Event], recording: dict[str, any]
|
||||
self, camera: str, reviews: list[ReviewSegment], recording: dict[str, any]
|
||||
) -> None:
|
||||
cache_path = recording["cache_path"]
|
||||
start_time = recording["start_time"]
|
||||
record_config = self.config.cameras[camera].record
|
||||
|
||||
# Just delete files if recordings are turned off
|
||||
if (
|
||||
@@ -232,10 +235,10 @@ class RecordingMaintainer(threading.Thread):
|
||||
):
|
||||
# if the cached segment overlaps with the events:
|
||||
overlaps = False
|
||||
for event in events:
|
||||
for review in reviews:
|
||||
# if the event starts in the future, stop checking events
|
||||
# and remove this segment
|
||||
if event.start_time > end_time.timestamp():
|
||||
if review.start_time > end_time.timestamp():
|
||||
overlaps = False
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
@@ -243,12 +246,16 @@ class RecordingMaintainer(threading.Thread):
|
||||
|
||||
# if the event is in progress or ends after the recording starts, keep it
|
||||
# and stop looking at events
|
||||
if event.end_time is None or event.end_time >= start_time.timestamp():
|
||||
if review.end_time is None or review.end_time >= start_time.timestamp():
|
||||
overlaps = True
|
||||
break
|
||||
|
||||
if overlaps:
|
||||
record_mode = self.config.cameras[camera].record.events.retain.mode
|
||||
record_mode = (
|
||||
record_config.alerts.retain.mode
|
||||
if review.severity == "alert"
|
||||
else record_config.detections.retain.mode
|
||||
)
|
||||
# move from cache to recordings immediately
|
||||
return await self.move_segment(
|
||||
camera,
|
||||
@@ -257,12 +264,14 @@ class RecordingMaintainer(threading.Thread):
|
||||
duration,
|
||||
cache_path,
|
||||
record_mode,
|
||||
event.data["type"] == "api",
|
||||
)
|
||||
# if it doesn't overlap with an event, go ahead and drop the segment
|
||||
# if it ends more than the configured pre_capture for the camera
|
||||
else:
|
||||
pre_capture = self.config.cameras[camera].record.events.pre_capture
|
||||
pre_capture = max(
|
||||
record_config.alerts.pre_capture,
|
||||
record_config.detections.pre_capture,
|
||||
)
|
||||
camera_info = self.object_recordings_info[camera]
|
||||
most_recently_processed_frame_time = (
|
||||
camera_info[-1][0] if len(camera_info) > 0 else 0
|
||||
@@ -349,12 +358,11 @@ class RecordingMaintainer(threading.Thread):
|
||||
duration: float,
|
||||
cache_path: str,
|
||||
store_mode: RetainModeEnum,
|
||||
manual_event: bool = False, # if this segment is being moved due to a manual event
|
||||
) -> Optional[Recordings]:
|
||||
segment_info = self.segment_stats(camera, start_time, end_time)
|
||||
|
||||
# check if the segment shouldn't be stored
|
||||
if not manual_event and segment_info.should_discard_segment(store_mode):
|
||||
if segment_info.should_discard_segment(store_mode):
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
return
|
||||
@@ -427,8 +435,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
Recordings.duration.name: duration,
|
||||
Recordings.motion.name: segment_info.motion_count,
|
||||
# TODO: update this to store list of active objects at some point
|
||||
Recordings.objects.name: segment_info.active_object_count
|
||||
+ (1 if manual_event else 0),
|
||||
Recordings.objects.name: segment_info.active_object_count,
|
||||
Recordings.regions.name: segment_info.region_count,
|
||||
Recordings.dBFS.name: segment_info.average_dBFS,
|
||||
Recordings.segment_size.name: segment_size,
|
||||
|
||||
@@ -11,7 +11,7 @@ from playhouse.sqliteq import SqliteQueueDatabase
|
||||
from setproctitle import setproctitle
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.models import Event, Recordings
|
||||
from frigate.models import Recordings, ReviewSegment
|
||||
from frigate.record.maintainer import RecordingMaintainer
|
||||
from frigate.util.services import listen
|
||||
|
||||
@@ -41,7 +41,7 @@ def manage_recordings(config: FrigateConfig) -> None:
|
||||
},
|
||||
timeout=max(60, 10 * len([c for c in config.cameras.values() if c.enabled])),
|
||||
)
|
||||
models = [Event, Recordings]
|
||||
models = [ReviewSegment, Recordings]
|
||||
db.bind(models)
|
||||
|
||||
maintainer = RecordingMaintainer(
|
||||
|
||||
@@ -381,9 +381,7 @@ class TestConfig(unittest.TestCase):
|
||||
def test_motion_mask_relative_matches_explicit(self):
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"record": {
|
||||
"events": {"retain": {"default": 20, "objects": {"person": 30}}}
|
||||
},
|
||||
"record": {"alerts": {"retain": {"days": 20}}},
|
||||
"cameras": {
|
||||
"explicit": {
|
||||
"ffmpeg": {
|
||||
@@ -555,9 +553,7 @@ class TestConfig(unittest.TestCase):
|
||||
def test_inherit_clips_retention(self):
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"record": {
|
||||
"events": {"retain": {"default": 20, "objects": {"person": 30}}}
|
||||
},
|
||||
"record": {"alerts": {"retain": {"days": 20}}},
|
||||
"cameras": {
|
||||
"back": {
|
||||
"ffmpeg": {
|
||||
@@ -577,15 +573,17 @@ class TestConfig(unittest.TestCase):
|
||||
assert config == frigate_config.model_dump(exclude_unset=True)
|
||||
|
||||
runtime_config = frigate_config.runtime_config()
|
||||
assert (
|
||||
runtime_config.cameras["back"].record.events.retain.objects["person"] == 30
|
||||
)
|
||||
assert runtime_config.cameras["back"].record.alerts.retain.days == 20
|
||||
|
||||
def test_roles_listed_twice_throws_error(self):
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"record": {
|
||||
"events": {"retain": {"default": 20, "objects": {"person": 30}}}
|
||||
"alerts": {
|
||||
"retain": {
|
||||
"days": 20,
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameras": {
|
||||
"back": {
|
||||
@@ -609,7 +607,11 @@ class TestConfig(unittest.TestCase):
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"record": {
|
||||
"events": {"retain": {"default": 20, "objects": {"person": 30}}}
|
||||
"alerts": {
|
||||
"retain": {
|
||||
"days": 20,
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameras": {
|
||||
"back": {
|
||||
@@ -633,7 +635,11 @@ class TestConfig(unittest.TestCase):
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"record": {
|
||||
"events": {"retain": {"default": 20, "objects": {"person": 30}}}
|
||||
"alerts": {
|
||||
"retain": {
|
||||
"days": 20,
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameras": {
|
||||
"back": {
|
||||
@@ -664,7 +670,11 @@ class TestConfig(unittest.TestCase):
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"record": {
|
||||
"events": {"retain": {"default": 20, "objects": {"person": 30}}}
|
||||
"alerts": {
|
||||
"retain": {
|
||||
"days": 20,
|
||||
}
|
||||
}
|
||||
},
|
||||
"cameras": {
|
||||
"back": {
|
||||
@@ -695,37 +705,6 @@ class TestConfig(unittest.TestCase):
|
||||
frigate_config.cameras["back"].zones["relative"].contour,
|
||||
)
|
||||
|
||||
def test_clips_should_default_to_global_objects(self):
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
"record": {
|
||||
"events": {"retain": {"default": 20, "objects": {"person": 30}}}
|
||||
},
|
||||
"objects": {"track": ["person", "dog"]},
|
||||
"cameras": {
|
||||
"back": {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
"record": {"events": {}},
|
||||
}
|
||||
},
|
||||
}
|
||||
frigate_config = FrigateConfig(**config)
|
||||
assert config == frigate_config.model_dump(exclude_unset=True)
|
||||
|
||||
runtime_config = frigate_config.runtime_config()
|
||||
back_camera = runtime_config.cameras["back"]
|
||||
assert back_camera.record.events.objects is None
|
||||
assert back_camera.record.events.retain.objects["person"] == 30
|
||||
|
||||
def test_role_assigned_but_not_enabled(self):
|
||||
config = {
|
||||
"mqtt": {"host": "mqtt"},
|
||||
|
||||
@@ -13,7 +13,7 @@ from frigate.util.services import get_video_properties
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CURRENT_CONFIG_VERSION = 0.14
|
||||
CURRENT_CONFIG_VERSION = "0.15-0"
|
||||
|
||||
|
||||
def migrate_frigate_config(config_file: str):
|
||||
@@ -29,7 +29,7 @@ def migrate_frigate_config(config_file: str):
|
||||
with open(config_file, "r") as f:
|
||||
config: dict[str, dict[str, any]] = yaml.load(f)
|
||||
|
||||
previous_version = config.get("version", 0.13)
|
||||
previous_version = str(config.get("version", "0.13"))
|
||||
|
||||
if previous_version == CURRENT_CONFIG_VERSION:
|
||||
logger.info("frigate config does not need migration...")
|
||||
@@ -38,12 +38,12 @@ def migrate_frigate_config(config_file: str):
|
||||
logger.info("copying config as backup...")
|
||||
shutil.copy(config_file, os.path.join(CONFIG_DIR, "backup_config.yaml"))
|
||||
|
||||
if previous_version < 0.14:
|
||||
if previous_version < "0.14":
|
||||
logger.info(f"Migrating frigate config from {previous_version} to 0.14...")
|
||||
new_config = migrate_014(config)
|
||||
with open(config_file, "w") as f:
|
||||
yaml.dump(new_config, f)
|
||||
previous_version = 0.14
|
||||
previous_version = "0.14"
|
||||
|
||||
logger.info("Migrating export file names...")
|
||||
for file in os.listdir(EXPORT_DIR):
|
||||
@@ -55,6 +55,13 @@ def migrate_frigate_config(config_file: str):
|
||||
os.path.join(EXPORT_DIR, file), os.path.join(EXPORT_DIR, new_name)
|
||||
)
|
||||
|
||||
if previous_version < "0.15-0":
|
||||
logger.info(f"Migrating frigate config from {previous_version} to 0.15-0...")
|
||||
new_config = migrate_015_0(config)
|
||||
with open(config_file, "w") as f:
|
||||
yaml.dump(new_config, f)
|
||||
previous_version = "0.15-0"
|
||||
|
||||
logger.info("Finished frigate config migration...")
|
||||
|
||||
|
||||
@@ -141,7 +148,99 @@ def migrate_014(config: dict[str, dict[str, any]]) -> dict[str, dict[str, any]]:
|
||||
|
||||
new_config["cameras"][name] = camera_config
|
||||
|
||||
new_config["version"] = 0.14
|
||||
new_config["version"] = "0.14"
|
||||
return new_config
|
||||
|
||||
|
||||
def migrate_015_0(config: dict[str, dict[str, any]]) -> dict[str, dict[str, any]]:
|
||||
"""Handle migrating frigate config to 0.15-0"""
|
||||
new_config = config.copy()
|
||||
|
||||
# migrate record.events to record.alerts and record.detections
|
||||
global_record_events = config.get("record", {}).get("events")
|
||||
if global_record_events:
|
||||
alerts_retention = {"retain": {}}
|
||||
detections_retention = {"retain": {}}
|
||||
|
||||
if global_record_events.get("pre_capture"):
|
||||
alerts_retention["pre_capture"] = global_record_events["pre_capture"]
|
||||
|
||||
if global_record_events.get("post_capture"):
|
||||
alerts_retention["post_capture"] = global_record_events["post_capture"]
|
||||
|
||||
if global_record_events.get("retain", {}).get("default"):
|
||||
alerts_retention["retain"]["days"] = global_record_events["retain"][
|
||||
"default"
|
||||
]
|
||||
|
||||
# decide logical detections retention based on current detections config
|
||||
if not config.get("review", {}).get("alerts", {}).get(
|
||||
"required_zones"
|
||||
) or config.get("review", {}).get("detections"):
|
||||
if global_record_events.get("pre_capture"):
|
||||
detections_retention["pre_capture"] = global_record_events[
|
||||
"pre_capture"
|
||||
]
|
||||
|
||||
if global_record_events.get("post_capture"):
|
||||
detections_retention["post_capture"] = global_record_events[
|
||||
"post_capture"
|
||||
]
|
||||
|
||||
if global_record_events.get("retain", {}).get("default"):
|
||||
detections_retention["retain"]["days"] = global_record_events["retain"][
|
||||
"default"
|
||||
]
|
||||
else:
|
||||
detections_retention["retain"]["days"] = 0
|
||||
|
||||
new_config["record"]["alerts"] = alerts_retention
|
||||
new_config["record"]["detections"] = detections_retention
|
||||
|
||||
del new_config["record"]["events"]
|
||||
|
||||
for name, camera in config.get("cameras", {}).items():
|
||||
camera_config: dict[str, dict[str, any]] = camera.copy()
|
||||
|
||||
record_events: dict[str, any] = camera_config.get("record", {}).get("events")
|
||||
|
||||
if record_events:
|
||||
alerts_retention = {"retain": {}}
|
||||
detections_retention = {"retain": {}}
|
||||
|
||||
if record_events.get("pre_capture"):
|
||||
alerts_retention["pre_capture"] = record_events["pre_capture"]
|
||||
|
||||
if record_events.get("post_capture"):
|
||||
alerts_retention["post_capture"] = record_events["post_capture"]
|
||||
|
||||
if record_events.get("retain", {}).get("default"):
|
||||
alerts_retention["retain"]["days"] = record_events["retain"]["default"]
|
||||
|
||||
# decide logical detections retention based on current detections config
|
||||
if not camera_config.get("review", {}).get("alerts", {}).get(
|
||||
"required_zones"
|
||||
) or camera_config.get("review", {}).get("detections"):
|
||||
if record_events.get("pre_capture"):
|
||||
detections_retention["pre_capture"] = record_events["pre_capture"]
|
||||
|
||||
if record_events.get("post_capture"):
|
||||
detections_retention["post_capture"] = record_events["post_capture"]
|
||||
|
||||
if record_events.get("retain", {}).get("default"):
|
||||
detections_retention["retain"]["days"] = record_events["retain"][
|
||||
"default"
|
||||
]
|
||||
else:
|
||||
detections_retention["retain"]["days"] = 0
|
||||
|
||||
camera_config["record"]["alerts"] = alerts_retention
|
||||
camera_config["record"]["detections"] = detections_retention
|
||||
del camera_config["record"]["events"]
|
||||
|
||||
new_config["cameras"][name] = camera_config
|
||||
|
||||
new_config["version"] = "0.15-0"
|
||||
return new_config
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user