Compare commits

..

34 Commits

Author SHA1 Message Date
Blake Blackshear
8ea0eeda06 update config example 2020-01-15 07:28:12 -06:00
Blake Blackshear
94878315ae remove region in process when skipping 2020-01-14 20:39:42 -06:00
Blake Blackshear
8dab9e17dd switch to opencv headless 2020-01-14 20:39:07 -06:00
Blake Blackshear
5b2470e91e add camera name to ffmpeg log messages 2020-01-14 20:38:55 -06:00
Blake Blackshear
3d5faa956c skip regions when the queue is too full and add more locks 2020-01-14 07:00:53 -06:00
Blake Blackshear
b615b84f57 switch back to stretch for hwaccel issues 2020-01-12 12:48:43 -06:00
Blake Blackshear
6f7b70665b check correct object 2020-01-12 07:51:49 -06:00
Blake Blackshear
3a5cb465fe cleanup 2020-01-12 07:50:43 -06:00
Blake Blackshear
205b8b413f add a label position arg for bounding boxes 2020-01-12 07:50:21 -06:00
Blake Blackshear
1b74d7a19f let the queues get as big as needed 2020-01-12 07:49:52 -06:00
Blake Blackshear
b18e8ca468 notify mqtt when objects deregistered 2020-01-12 07:14:42 -06:00
Blake Blackshear
9ebe186443 fix multiple object type tracking 2020-01-11 13:22:56 -06:00
Blake Blackshear
e580aca440 switch everything to run off of tracked objects 2020-01-09 20:53:04 -06:00
Blake Blackshear
191f293037 group by label before tracking objects 2020-01-09 06:52:28 -06:00
Blake Blackshear
d31ba69b1b fix mask filtering 2020-01-09 06:50:53 -06:00
Blake Blackshear
02e1035826 make a copy 2020-01-09 06:49:39 -06:00
Blake Blackshear
3d419a39a8 fix object filters 2020-01-08 06:40:40 -06:00
Blake Blackshear
474a3e604d group by label before suppressing boxes 2020-01-07 20:44:00 -06:00
Blake Blackshear
fc757ad04f update all obj props 2020-01-07 20:43:25 -06:00
Blake Blackshear
2a86d3e2e8 add thread to write frames to disk 2020-01-06 20:36:38 -06:00
Blake Blackshear
3e374ceb5f merge boxes by label 2020-01-06 20:36:04 -06:00
Blake Blackshear
0b8f2cadf3 fix color of best image 2020-01-06 20:34:53 -06:00
Blake Blackshear
42f666491a remove unused current frame variable 2020-01-06 07:38:37 -06:00
Blake Blackshear
35771b3444 removing pillow-simd for now 2020-01-06 06:48:11 -06:00
Blake Blackshear
2010ae8f87 revamp dockerfile 2020-01-05 17:43:14 -06:00
Blake Blackshear
fb0f6bcfae track objects and add config for tracked objects 2020-01-04 18:13:53 -06:00
Blake Blackshear
7b1da388d9 implement filtering and switch to NMS with OpenCV 2020-01-04 12:02:06 -06:00
Blake Blackshear
5d0c12fbd4 cleanup imports 2020-01-04 12:00:29 -06:00
Blake Blackshear
a43fd96349 fixing a few things 2020-01-02 07:43:46 -06:00
Blake Blackshear
bf94fdc54d dedupe detected objects 2020-01-02 07:43:46 -06:00
Blake Blackshear
48b3f22866 working dynamic regions, but messy 2020-01-02 07:43:46 -06:00
Blake Blackshear
36443980ea process detected objects in a queue 2020-01-02 07:43:46 -06:00
Blake Blackshear
0f8f8fa3b3 label threads and implements stats endpoint 2020-01-02 07:43:46 -06:00
Blake Blackshear
d8a3f8fc9d refactor resizing into generic priority queues 2020-01-02 07:43:46 -06:00
4 changed files with 13 additions and 73 deletions

View File

@@ -1,4 +1,4 @@
FROM ubuntu:18.04
FROM debian:stretch-slim
LABEL maintainer "blakeb@blakeshome.com"
ENV DEBIAN_FRONTEND=noninteractive
@@ -16,7 +16,7 @@ RUN apt -qq update && apt -qq install --no-install-recommends -y \
# pillow-simd
# zlib1g-dev libjpeg-dev \
# VAAPI drivers for Intel hardware accel
libva-drm2 libva2 i965-va-driver vainfo \
i965-va-driver vainfo \
&& echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" > /etc/apt/sources.list.d/coral-edgetpu.list \
&& wget -q -O - https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \
&& apt -qq update \

View File

@@ -81,12 +81,6 @@ cameras:
# hwaccel_args: []
# input_args: []
# output_args: []
################
## Optionally specify the resolution of the video feed. Frigate will try to auto detect if not specified
################
# height: 1280
# width: 720
################
## Optional mask. Must be the same dimensions as your video feed.
@@ -106,19 +100,7 @@ cameras:
take_frame: 1
################
# The number of seconds frigate will allow a camera to go without sending a frame before
# assuming the ffmpeg process has a problem and restarting.
################
# watchdog_timeout: 300
################
# Configuration for the snapshot sent over mqtt
################
snapshots:
show_timestamp: True
################
# Camera level object config. This config is merged with the global config above.
# Overrides for global object config
################
objects:
track:

View File

@@ -88,30 +88,21 @@ class DetectedObjectsProcessor(threading.Thread):
obj['clipped'] = True
# Compute the area
# TODO: +1 right?
obj['area'] = (obj['box']['xmax']-obj['box']['xmin'])*(obj['box']['ymax']-obj['box']['ymin'])
self.camera.detected_objects[frame['frame_time']].append(obj)
# TODO: use in_process and processed counts instead to avoid lock
with self.camera.regions_in_process_lock:
if frame['frame_time'] in self.camera.regions_in_process:
self.camera.regions_in_process[frame['frame_time']] -= 1
self.camera.regions_in_process[frame['frame_time']] -= 1
# print(f"{frame['frame_time']} remaining regions {self.camera.regions_in_process[frame['frame_time']]}")
if self.camera.regions_in_process[frame['frame_time']] == 0:
del self.camera.regions_in_process[frame['frame_time']]
# print(f"{frame['frame_time']} no remaining regions")
self.camera.finished_frame_queue.put(frame['frame_time'])
else:
if self.camera.regions_in_process[frame['frame_time']] == 0:
del self.camera.regions_in_process[frame['frame_time']]
# print(f"{frame['frame_time']} no remaining regions")
self.camera.finished_frame_queue.put(frame['frame_time'])
# Thread that checks finished frames for clipped objects and sends back
# for processing if needed
# TODO: evaluate whether or not i really need separate threads/queues for each step
# given that only 1 thread will really be able to run at a time. you need a
# separate process to actually do things in parallel for when you are CPU bound.
# threads are good when you are waiting and could be processing while you wait
class RegionRefiner(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
@@ -369,9 +360,6 @@ class ObjectTracker(threading.Thread):
# than the number of existing object centroids we need to
# register each new input centroid as a trackable object
# if D.shape[0] < D.shape[1]:
# TODO: rather than assuming these are new objects, we could
# look to see if any of the remaining boxes have a large amount
# of overlap...
for col in unusedCols:
self.register(col, group[col])
@@ -411,8 +399,7 @@ class BestFrames(threading.Thread):
obj['box']['xmax'], obj['box']['ymax'], obj['name'], "{}% {}".format(int(obj['score']*100), obj['area']))
# print a timestamp
if self.camera.snapshot_config['show_timestamp']:
time_to_show = datetime.datetime.fromtimestamp(obj['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
cv2.putText(best_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
time_to_show = datetime.datetime.fromtimestamp(obj['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
cv2.putText(best_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
self.best_frames[name] = best_frame

View File

@@ -11,7 +11,6 @@ import numpy as np
import prctl
import copy
import itertools
import json
from collections import defaultdict
from frigate.util import tonumpyarray, LABELS, draw_box_with_label, calculate_region, EventsPerSecond
from frigate.object_detection import RegionPrepper, RegionRequester
@@ -43,29 +42,8 @@ class FrameTracker(threading.Thread):
del self.recent_frames[k]
def get_frame_shape(source):
ffprobe_cmd = " ".join([
'ffprobe',
'-v',
'panic',
'-show_error',
'-show_streams',
'-of',
'json',
'"'+source+'"'
])
print(ffprobe_cmd)
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
info = json.loads(output)
print(info)
video_info = [s for s in info['streams'] if s['codec_type'] == 'video'][0]
if video_info['height'] != 0 and video_info['width'] != 0:
return (video_info['height'], video_info['width'], 3)
# fallback to using opencv if ffprobe didnt succeed
# capture a single frame and check the frame shape so the correct array
# size can be allocated in memory
video = cv2.VideoCapture(source)
ret, frame = video.read()
frame_shape = frame.shape
@@ -87,7 +65,7 @@ class CameraWatchdog(threading.Thread):
# wait a bit before checking
time.sleep(10)
if self.camera.frame_time.value != 0.0 and (datetime.datetime.now().timestamp() - self.camera.frame_time.value) > self.camera.watchdog_timeout:
if self.camera.frame_time.value != 0.0 and (datetime.datetime.now().timestamp() - self.camera.frame_time.value) > 300:
print(self.camera.name + ": last frame is more than 5 minutes old, restarting camera capture...")
self.camera.start_or_restart_capture()
time.sleep(5)
@@ -173,15 +151,8 @@ class Camera:
camera_objects_config = config.get('objects', {})
self.take_frame = self.config.get('take_frame', 1)
self.watchdog_timeout = self.config.get('watchdog_timeout', 300)
self.snapshot_config = {
'show_timestamp': self.config.get('snapshots', {}).get('show_timestamp', True)
}
self.regions = self.config['regions']
if 'width' in self.config and 'height' in self.config:
self.frame_shape = (self.config['height'], self.config['width'], 3)
else:
self.frame_shape = get_frame_shape(self.ffmpeg_input)
self.frame_shape = get_frame_shape(self.ffmpeg_input)
self.frame_size = self.frame_shape[0] * self.frame_shape[1] * self.frame_shape[2]
self.mqtt_client = mqtt_client
self.mqtt_topic_prefix = '{}/{}'.format(mqtt_prefix, self.name)