Compare commits

...

15 Commits

Author SHA1 Message Date
Blake Blackshear
c1155af169 ensure cache copies when events have ended 2021-11-21 09:43:37 -06:00
Blake Blackshear
77c1f1bb1b cleanup missing files from database once per hour 2021-11-21 07:55:35 -06:00
Blake Blackshear
ae3c01fe2d handle missing file edge case 2021-11-21 07:26:31 -06:00
Blake Blackshear
7a2a85d253 log error messages on vod endpoints 2021-11-21 07:25:36 -06:00
Blake Blackshear
77c66d4e49 ensure duration > 0 for segments 2021-11-21 07:25:01 -06:00
Blake Blackshear
494e5ac4ec use snapshot url to support in progress events 2021-11-20 09:52:02 -06:00
Blake Blackshear
63b7465452 ensure stationary interval is greater than 0 2021-11-20 09:15:03 -06:00
Blake Blackshear
e6d2df5661 add duration to cache 2021-11-19 16:56:00 -06:00
Blake Blackshear
a3301e0347 avoid running ffprobe for each segment multiple times 2021-11-19 07:28:51 -06:00
Blake Blackshear
3d556cc2cb warn if no wait time 2021-11-19 07:19:14 -06:00
Blake Blackshear
585efe1a0f keep 5 segments in cache 2021-11-19 07:16:29 -06:00
Blake Blackshear
c7d47439dd better cache handling 2021-11-17 08:57:57 -06:00
Blake Blackshear
19a6978228 avoid proactive messages with retain_days 0 and handle first pass 2021-11-17 07:44:58 -06:00
Blake Blackshear
1ebb8a54bf avoid divide by zero 2021-11-17 07:29:23 -06:00
Blake Blackshear
ae968044d6 revert switch to b/w frame prep 2021-11-17 07:28:53 -06:00
6 changed files with 99 additions and 58 deletions

View File

@@ -154,7 +154,8 @@ class DetectConfig(FrigateBaseModel):
title="Maximum number of frames the object can dissapear before detection ends."
)
stationary_interval: Optional[int] = Field(
title="Frame interval for checking stationary objects."
title="Frame interval for checking stationary objects.",
ge=1,
)

View File

@@ -658,10 +658,15 @@ def vod_ts(camera, start_ts, end_ts):
# Determine if we need to end the last clip early
if recording.end_time > end_ts:
duration -= int((recording.end_time - end_ts) * 1000)
clips.append(clip)
durations.append(duration)
if duration > 0:
clips.append(clip)
durations.append(duration)
else:
logger.warning(f"Recording clip is missing or empty: {recording.path}")
if not clips:
logger.error("No recordings found for the requested time range")
return "No recordings found.", 404
hour_ago = datetime.now() - timedelta(hours=1)
@@ -690,10 +695,12 @@ def vod_event(id):
try:
event: Event = Event.get(Event.id == id)
except DoesNotExist:
logger.error(f"Event not found: {id}")
return "Event not found.", 404
if not event.has_clip:
return "Clip not available", 404
logger.error(f"Event does not have recordings: {id}")
return "Recordings not available", 404
clip_path = os.path.join(CLIPS_DIR, f"{event.camera}-{id}.mp4")

View File

@@ -40,10 +40,12 @@ class MotionDetector:
# Improve contrast
minval = np.percentile(resized_frame, 4)
maxval = np.percentile(resized_frame, 96)
resized_frame = np.clip(resized_frame, minval, maxval)
resized_frame = (((resized_frame - minval) / (maxval - minval)) * 255).astype(
np.uint8
)
# don't adjust if the image is a single color
if minval < maxval:
resized_frame = np.clip(resized_frame, minval, maxval)
resized_frame = (
((resized_frame - minval) / (maxval - minval)) * 255
).astype(np.uint8)
# mask frame
resized_frame[self.mask] = [255]

View File

@@ -45,6 +45,8 @@ class RecordingMaintainer(threading.Thread):
self.name = "recording_maint"
self.config = config
self.stop_event = stop_event
self.first_pass = True
self.end_time_cache = {}
def move_files(self):
cache_files = [
@@ -87,19 +89,18 @@ class RecordingMaintainer(threading.Thread):
}
)
# delete all cached files past the most recent 2
# delete all cached files past the most recent 5
keep_count = 5
for camera in grouped_recordings.keys():
if len(grouped_recordings[camera]) > 2:
logger.warning(
"Proactively cleaning cache. Your recordings disk may be too slow."
)
if len(grouped_recordings[camera]) > keep_count:
sorted_recordings = sorted(
grouped_recordings[camera], key=lambda i: i["start_time"]
)
to_remove = sorted_recordings[:-2]
to_remove = sorted_recordings[:-keep_count]
for f in to_remove:
Path(f["cache_path"]).unlink(missing_ok=True)
grouped_recordings[camera] = sorted_recordings[-2:]
self.end_time_cache.pop(f["cache_path"], None)
grouped_recordings[camera] = sorted_recordings[-keep_count:]
for camera, recordings in grouped_recordings.items():
# get all events with the end time after the start of the oldest cache file
@@ -109,7 +110,7 @@ class RecordingMaintainer(threading.Thread):
.where(
Event.camera == camera,
(Event.end_time == None)
| (Event.end_time >= recordings[0]["start_time"]),
| (Event.end_time >= recordings[0]["start_time"].timestamp()),
Event.has_clip,
)
.order_by(Event.start_time)
@@ -124,26 +125,31 @@ class RecordingMaintainer(threading.Thread):
or not self.config.cameras[camera].record.enabled
):
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
continue
ffprobe_cmd = [
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{cache_path}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0:
duration = float(p.stdout.decode().strip())
end_time = start_time + datetime.timedelta(seconds=duration)
if cache_path in self.end_time_cache:
end_time, duration = self.end_time_cache[cache_path]
else:
logger.warning(f"Discarding a corrupt recording segment: {f}")
Path(cache_path).unlink(missing_ok=True)
continue
ffprobe_cmd = [
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{cache_path}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0:
duration = float(p.stdout.decode().strip())
end_time = start_time + datetime.timedelta(seconds=duration)
self.end_time_cache[cache_path] = (end_time, duration)
else:
logger.warning(f"Discarding a corrupt recording segment: {f}")
Path(cache_path).unlink(missing_ok=True)
continue
# if cached file's start_time is earlier than the retain_days for the camera
if start_time <= (
@@ -158,14 +164,17 @@ class RecordingMaintainer(threading.Thread):
overlaps = False
for event in events:
# if the event starts in the future, stop checking events
# and let this recording segment expire
# and remove this segment
if event.start_time > end_time.timestamp():
overlaps = False
break
# if the event is in progress or ends after the recording starts, keep it
# and stop looking at events
if event.end_time is None or event.end_time >= start_time:
if (
event.end_time is None
or event.end_time >= start_time.timestamp()
):
overlaps = True
break
@@ -218,6 +227,9 @@ class RecordingMaintainer(threading.Thread):
Path(cache_path).unlink(missing_ok=True)
logger.error(e)
# clear end_time cache
self.end_time_cache.pop(cache_path, None)
def run(self):
# Check for new files every 5 seconds
wait_time = 5
@@ -230,7 +242,14 @@ class RecordingMaintainer(threading.Thread):
"Error occurred when attempting to maintain recording cache"
)
logger.error(e)
wait_time = max(0, 5 - (datetime.datetime.now().timestamp() - run_start))
duration = datetime.datetime.now().timestamp() - run_start
wait_time = max(0, 5 - duration)
if wait_time == 0 and not self.first_pass:
logger.warning(
"Cache is taking longer than 5 seconds to clear. Your recordings disk may be too slow."
)
if self.first_pass:
self.first_pass = False
logger.info(f"Exiting recording maintenance...")
@@ -389,11 +408,40 @@ class RecordingCleanup(threading.Thread):
for f in files_to_check:
p = Path(f)
if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire):
p.unlink(missing_ok=True)
try:
if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire):
p.unlink(missing_ok=True)
except FileNotFoundError:
logger.warning(f"Attempted to expire missing file: {f}")
logger.debug("End expire files (legacy).")
def sync_recordings(self):
logger.debug("Start sync recordings.")
# get all recordings in the db
recordings: Recordings = Recordings.select()
# get all recordings files on disk
process = sp.run(
["find", RECORD_DIR, "-type", "f"],
capture_output=True,
text=True,
)
files_on_disk = process.stdout.splitlines()
recordings_to_delete = []
for recording in recordings.objects().iterator():
if not recording.path in files_on_disk:
recordings_to_delete.append(recording.id)
logger.debug(
f"Deleting {len(recordings_to_delete)} recordings with missing files"
)
Recordings.delete().where(Recordings.id << recordings_to_delete).execute()
logger.debug("End sync recordings.")
def run(self):
# Expire recordings every minute, clean directories every hour.
for counter in itertools.cycle(range(60)):
@@ -407,3 +455,4 @@ class RecordingCleanup(threading.Thread):
if counter == 0:
self.expire_files()
remove_empty_directories(RECORD_DIR)
self.sync_recordings()

View File

@@ -75,25 +75,7 @@ def filtered(obj, objects_to_track, object_filters):
def create_tensor_input(frame, model_shape, region):
# TODO: is it faster to just convert grayscale to RGB? or repeat dimensions with numpy?
height = frame.shape[0] // 3 * 2
width = frame.shape[1]
# get the crop box if the region extends beyond the frame
crop_x1 = max(0, region[0])
crop_y1 = max(0, region[1])
crop_x2 = min(width, region[2])
crop_y2 = min(height, region[3])
size = region[3] - region[1]
cropped_frame = np.zeros((size, size), np.uint8)
cropped_frame[
0 : crop_y2 - crop_y1,
0 : crop_x2 - crop_x1,
] = frame[crop_y1:crop_y2, crop_x1:crop_x2]
cropped_frame = np.repeat(np.expand_dims(cropped_frame, -1), 3, 2)
cropped_frame = yuv_region_2_rgb(frame, region)
# Resize to 300x300 if needed
if cropped_frame.shape != (model_shape[0], model_shape[1], 3):

View File

@@ -199,7 +199,7 @@ export default function Event({ eventId, close, scrollRef }) {
<img
src={
data.has_snapshot
? `${apiHost}/clips/${data.camera}-${eventId}.jpg`
? `${apiHost}/api/events/${eventId}/snapshot.jpg`
: `data:image/jpeg;base64,${data.thumbnail}`
}
alt={`${data.label} at ${(data.top_score * 100).toFixed(1)}% confidence`}