forked from Github/frigate
Compare commits
8 Commits
v0.5.0-rc1
...
v0.5.0-rc3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7686c510b3 | ||
|
|
2f5e322d3c | ||
|
|
1cd4c12104 | ||
|
|
1a8b034685 | ||
|
|
da6dc03a57 | ||
|
|
7fa3b70d2d | ||
|
|
1fc5a2bfd4 | ||
|
|
7e84da7dad |
@@ -66,11 +66,12 @@ class CameraWatchdog(threading.Thread):
|
||||
time.sleep(10)
|
||||
while True:
|
||||
# wait a bit before checking
|
||||
time.sleep(10)
|
||||
time.sleep(30)
|
||||
|
||||
for name, camera_process in self.camera_processes.items():
|
||||
process = camera_process['process']
|
||||
if (datetime.datetime.now().timestamp() - self.object_processor.get_current_frame_time(name)) > 30:
|
||||
if (not self.object_processor.get_current_frame_time(name) is None and
|
||||
(datetime.datetime.now().timestamp() - self.object_processor.get_current_frame_time(name)) > 30):
|
||||
print(f"Last frame for {name} is more than 30 seconds old...")
|
||||
if process.is_alive():
|
||||
process.terminate()
|
||||
@@ -84,6 +85,8 @@ class CameraWatchdog(threading.Thread):
|
||||
print(f"Process for {name} is not alive. Starting again...")
|
||||
camera_process['fps'].value = float(self.config[name]['fps'])
|
||||
camera_process['skipped_fps'].value = 0.0
|
||||
camera_process['detection_fps'].value = 0.0
|
||||
self.object_processor.camera_data[name]['current_frame_time'] = None
|
||||
process = mp.Process(target=track_camera, args=(name, self.config[name], FFMPEG_DEFAULT_CONFIG, GLOBAL_OBJECT_CONFIG,
|
||||
self.tflite_process.detect_lock, self.tflite_process.detect_ready, self.tflite_process.frame_ready, self.tracked_objects_queue,
|
||||
camera_process['fps'], camera_process['skipped_fps'], camera_process['detection_fps']))
|
||||
@@ -117,7 +120,7 @@ def main():
|
||||
|
||||
# start plasma store
|
||||
plasma_cmd = ['plasma_store', '-m', '400000000', '-s', '/tmp/plasma']
|
||||
plasma_process = sp.Popen(plasma_cmd, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
|
||||
plasma_process = sp.Popen(plasma_cmd, stdout=sp.DEVNULL)
|
||||
time.sleep(1)
|
||||
rc = plasma_process.poll()
|
||||
if rc is not None:
|
||||
@@ -144,8 +147,7 @@ def main():
|
||||
camera_processes[name] = {
|
||||
'fps': mp.Value('d', float(config['fps'])),
|
||||
'skipped_fps': mp.Value('d', 0.0),
|
||||
'detection_fps': mp.Value('d', 0.0),
|
||||
'last_frame': datetime.datetime.now().timestamp()
|
||||
'detection_fps': mp.Value('d', 0.0)
|
||||
}
|
||||
camera_process = mp.Process(target=track_camera, args=(name, config, FFMPEG_DEFAULT_CONFIG, GLOBAL_OBJECT_CONFIG,
|
||||
tflite_process.detect_lock, tflite_process.detect_ready, tflite_process.frame_ready, tracked_objects_queue,
|
||||
@@ -192,6 +194,11 @@ def main():
|
||||
'inference_speed': round(tflite_process.avg_inference_speed.value*1000, 2)
|
||||
}
|
||||
|
||||
rc = plasma_process.poll()
|
||||
stats['plasma_store_rc'] = rc
|
||||
|
||||
stats['tracked_objects_queue'] = tracked_objects_queue.qsize()
|
||||
|
||||
return jsonify(stats)
|
||||
|
||||
@app.route('/<camera_name>/<label>/best.jpg')
|
||||
|
||||
@@ -34,7 +34,9 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
'best_objects': {},
|
||||
'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')),
|
||||
'tracked_objects': {},
|
||||
'current_frame_time': datetime.datetime.now().timestamp()
|
||||
'current_frame_time': None,
|
||||
'current_frame': np.zeros((720,1280,3), np.uint8),
|
||||
'object_id': None
|
||||
})
|
||||
|
||||
def get_best(self, camera, label):
|
||||
@@ -64,33 +66,40 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
object_id_hash = hashlib.sha1(str.encode(f"{camera}{frame_time}"))
|
||||
object_id_bytes = object_id_hash.digest()
|
||||
object_id = plasma.ObjectID(object_id_bytes)
|
||||
current_frame = self.plasma_client.get(object_id)
|
||||
|
||||
# draw the bounding boxes on the frame
|
||||
for obj in tracked_objects.values():
|
||||
thickness = 2
|
||||
color = COLOR_MAP[obj['label']]
|
||||
|
||||
if obj['frame_time'] != frame_time:
|
||||
thickness = 1
|
||||
color = (255,0,0)
|
||||
current_frame = self.plasma_client.get(object_id, timeout_ms=0)
|
||||
|
||||
if not current_frame is plasma.ObjectNotAvailable:
|
||||
# draw the bounding boxes on the frame
|
||||
box = obj['box']
|
||||
draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
|
||||
# draw the regions on the frame
|
||||
region = obj['region']
|
||||
cv2.rectangle(current_frame, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1)
|
||||
|
||||
if config['snapshots']['show_timestamp']:
|
||||
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
|
||||
cv2.putText(current_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
|
||||
for obj in tracked_objects.values():
|
||||
thickness = 2
|
||||
color = COLOR_MAP[obj['label']]
|
||||
|
||||
if obj['frame_time'] != frame_time:
|
||||
thickness = 1
|
||||
color = (255,0,0)
|
||||
|
||||
###
|
||||
# Set the current frame as ready
|
||||
###
|
||||
self.camera_data[camera]['current_frame'] = current_frame
|
||||
self.camera_data[camera]['current_frame_time'] = frame_time
|
||||
# draw the bounding boxes on the frame
|
||||
box = obj['box']
|
||||
draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
|
||||
# draw the regions on the frame
|
||||
region = obj['region']
|
||||
cv2.rectangle(current_frame, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1)
|
||||
|
||||
if config['snapshots']['show_timestamp']:
|
||||
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
|
||||
cv2.putText(current_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
|
||||
|
||||
###
|
||||
# Set the current frame as ready
|
||||
###
|
||||
self.camera_data[camera]['current_frame'] = current_frame
|
||||
self.camera_data[camera]['current_frame_time'] = frame_time
|
||||
|
||||
# store the object id, so you can delete it at the next loop
|
||||
previous_object_id = self.camera_data[camera]['object_id']
|
||||
if not previous_object_id is None:
|
||||
self.plasma_client.delete([previous_object_id])
|
||||
self.camera_data[camera]['object_id'] = object_id
|
||||
|
||||
###
|
||||
# Maintain the highest scoring recent object and frame for each label
|
||||
@@ -104,10 +113,10 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
# if the object is a higher score than the current best score
|
||||
# or the current object is more than 1 minute old, use the new object
|
||||
if obj['score'] > best_objects[obj['label']]['score'] or (now - best_objects[obj['label']]['frame_time']) > 60:
|
||||
obj['frame'] = np.copy(current_frame)
|
||||
obj['frame'] = np.copy(self.camera_data[camera]['current_frame'])
|
||||
best_objects[obj['label']] = obj
|
||||
else:
|
||||
obj['frame'] = np.copy(current_frame)
|
||||
obj['frame'] = np.copy(self.camera_data[camera]['current_frame'])
|
||||
best_objects[obj['label']] = obj
|
||||
|
||||
###
|
||||
|
||||
@@ -49,14 +49,6 @@ class ObjectTracker():
|
||||
obj['history'] = [entry]
|
||||
|
||||
def match_and_update(self, frame_time, new_objects):
|
||||
if len(new_objects) == 0:
|
||||
for id in list(self.tracked_objects.keys()):
|
||||
if self.disappeared[id] >= self.max_disappeared:
|
||||
self.deregister(id)
|
||||
else:
|
||||
self.disappeared[id] += 1
|
||||
return
|
||||
|
||||
# group by name
|
||||
new_object_groups = defaultdict(lambda: [])
|
||||
for obj in new_objects:
|
||||
@@ -69,6 +61,18 @@ class ObjectTracker():
|
||||
'frame_time': frame_time
|
||||
})
|
||||
|
||||
# update any tracked objects with labels that are not
|
||||
# seen in the current objects and deregister if needed
|
||||
for obj in list(self.tracked_objects.values()):
|
||||
if not obj['label'] in new_object_groups:
|
||||
if self.disappeared[obj['id']] >= self.max_disappeared:
|
||||
self.deregister(obj['id'])
|
||||
else:
|
||||
self.disappeared[obj['id']] += 1
|
||||
|
||||
if len(new_objects) == 0:
|
||||
return
|
||||
|
||||
# track objects for each label type
|
||||
for label, group in new_object_groups.items():
|
||||
current_objects = [o for o in self.tracked_objects.values() if o['label'] == label]
|
||||
|
||||
@@ -20,7 +20,6 @@ from frigate.objects import ObjectTracker
|
||||
from frigate.edgetpu import RemoteObjectDetector
|
||||
from frigate.motion import MotionDetector
|
||||
|
||||
# TODO: add back opencv fallback
|
||||
def get_frame_shape(source):
|
||||
ffprobe_cmd = " ".join([
|
||||
'ffprobe',
|
||||
@@ -125,7 +124,11 @@ def track_camera(name, config, ffmpeg_global_config, global_objects_config, dete
|
||||
expected_fps = config['fps']
|
||||
take_frame = config.get('take_frame', 1)
|
||||
|
||||
frame_shape = get_frame_shape(ffmpeg_input)
|
||||
if 'width' in config and 'height' in config:
|
||||
frame_shape = (config['height'], config['width'], 3)
|
||||
else:
|
||||
frame_shape = get_frame_shape(ffmpeg_input)
|
||||
|
||||
frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
|
||||
|
||||
try:
|
||||
|
||||
Reference in New Issue
Block a user