db improvements (#7227)

* Store camera labels in dict and other optimizations

* Add max on timeout so it is at least 60

* Ensure db timeout is at least 60

* Update list once a day to ensure new labels are cleaned up

* Formatting

* Insert recordings as bulk instead of individually.

* Fix

* Refactor event and timeline cleanup

* Remove unused
This commit is contained in:
Nicolas Mowen
2023-07-21 06:29:50 -06:00
committed by GitHub
parent b655eca152
commit bfa7a5cc60
6 changed files with 120 additions and 78 deletions

View File

@@ -12,7 +12,7 @@ from peewee import DatabaseError, chunked
from frigate.config import FrigateConfig, RetainModeEnum
from frigate.const import RECORD_DIR
from frigate.models import Event, Recordings, RecordingsToDelete, Timeline
from frigate.models import Event, Recordings, RecordingsToDelete
from frigate.record.util import remove_empty_directories
logger = logging.getLogger(__name__)
@@ -140,15 +140,6 @@ class RecordingCleanup(threading.Thread):
Path(recording.path).unlink(missing_ok=True)
deleted_recordings.add(recording.id)
# delete timeline entries relevant to this recording segment
Timeline.delete().where(
Timeline.timestamp.between(
recording.start_time, recording.end_time
),
Timeline.timestamp < expire_date,
Timeline.camera == camera,
).execute()
logger.debug(f"Expiring {len(deleted_recordings)} recordings")
# delete up to 100,000 at a time
max_deletes = 100000

View File

@@ -125,6 +125,7 @@ class RecordingMaintainer(threading.Thread):
self.end_time_cache.pop(cache_path, None)
grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]
tasks = []
for camera, recordings in grouped_recordings.items():
# clear out all the object recording info for old frames
while (
@@ -155,10 +156,15 @@ class RecordingMaintainer(threading.Thread):
.order_by(Event.start_time)
)
await asyncio.gather(
*(self.validate_and_move_segment(camera, events, r) for r in recordings)
tasks.extend(
[self.validate_and_move_segment(camera, events, r) for r in recordings]
)
recordings_to_insert: list[Optional[Recordings]] = await asyncio.gather(*tasks)
Recordings.insert_many(
[r for r in recordings_to_insert if r is not None]
).execute()
async def validate_and_move_segment(
self, camera: str, events: Event, recording: dict[str, any]
) -> None:
@@ -225,7 +231,7 @@ class RecordingMaintainer(threading.Thread):
if overlaps:
record_mode = self.config.cameras[camera].record.events.retain.mode
# move from cache to recordings immediately
self.store_segment(
return self.move_segment(
camera,
start_time,
end_time,
@@ -247,7 +253,7 @@ class RecordingMaintainer(threading.Thread):
# else retain days includes this segment
else:
record_mode = self.config.cameras[camera].record.retain.mode
self.store_segment(
return self.move_segment(
camera, start_time, end_time, duration, cache_path, record_mode
)
@@ -290,7 +296,7 @@ class RecordingMaintainer(threading.Thread):
return SegmentInfo(motion_count, active_count, round(average_dBFS))
def store_segment(
def move_segment(
self,
camera: str,
start_time: datetime.datetime,
@@ -298,7 +304,7 @@ class RecordingMaintainer(threading.Thread):
duration: float,
cache_path: str,
store_mode: RetainModeEnum,
) -> None:
) -> Optional[Recordings]:
segment_info = self.segment_stats(camera, start_time, end_time)
# check if the segment shouldn't be stored
@@ -348,7 +354,7 @@ class RecordingMaintainer(threading.Thread):
if p.returncode != 0:
logger.error(f"Unable to convert {cache_path} to {file_path}")
logger.error(p.stderr)
return
return None
else:
logger.debug(
f"Copied {file_path} in {datetime.datetime.now().timestamp()-start_frame} seconds."
@@ -368,19 +374,20 @@ class RecordingMaintainer(threading.Thread):
rand_id = "".join(
random.choices(string.ascii_lowercase + string.digits, k=6)
)
Recordings.create(
id=f"{start_time.timestamp()}-{rand_id}",
camera=camera,
path=file_path,
start_time=start_time.timestamp(),
end_time=end_time.timestamp(),
duration=duration,
motion=segment_info.motion_box_count,
return {
Recordings.id: f"{start_time.timestamp()}-{rand_id}",
Recordings.camera: camera,
Recordings.path: file_path,
Recordings.start_time: start_time.timestamp(),
Recordings.end_time: end_time.timestamp(),
Recordings.duration: duration,
Recordings.motion: segment_info.motion_box_count,
# TODO: update this to store list of active objects at some point
objects=segment_info.active_object_count,
dBFS=segment_info.average_dBFS,
segment_size=segment_size,
)
Recordings.objects: segment_info.active_object_count,
Recordings.dBFS: segment_info.average_dBFS,
Recordings.segment_size: segment_size,
}
except Exception as e:
logger.error(f"Unable to store recording segment {cache_path}")
Path(cache_path).unlink(missing_ok=True)
@@ -388,10 +395,11 @@ class RecordingMaintainer(threading.Thread):
# clear end_time cache
self.end_time_cache.pop(cache_path, None)
return None
def run(self) -> None:
# Check for new files every 5 seconds
wait_time = 5.0
wait_time = 0.0
while not self.stop_event.wait(wait_time):
run_start = datetime.datetime.now().timestamp()

View File

@@ -45,7 +45,7 @@ def manage_recordings(
"cache_size": -512 * 1000, # 512MB of cache
"synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous
},
timeout=10 * len([c for c in config.cameras.values() if c.enabled]),
timeout=max(60, 10 * len([c for c in config.cameras.values() if c.enabled])),
)
models = [Event, Recordings, Timeline, RecordingsToDelete]
db.bind(models)