forked from Github/frigate
Compare commits
9 Commits
v0.7.0-rc2
...
v0.7.0-rc3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1771b9f286 | ||
|
|
56a9a46625 | ||
|
|
5174fc539f | ||
|
|
95cb97cf42 | ||
|
|
53adfe0b09 | ||
|
|
8d4e155ece | ||
|
|
6069709035 | ||
|
|
c9d7fbbd12 | ||
|
|
0b51f58de0 |
@@ -215,6 +215,7 @@ cameras:
|
|||||||
snapshots:
|
snapshots:
|
||||||
show_timestamp: True
|
show_timestamp: True
|
||||||
draw_zones: False
|
draw_zones: False
|
||||||
|
draw_bounding_boxes: True
|
||||||
|
|
||||||
################
|
################
|
||||||
# Camera level object config. If defined, this is used instead of the global config.
|
# Camera level object config. If defined, this is used instead of the global config.
|
||||||
|
|||||||
@@ -164,7 +164,8 @@ def main():
|
|||||||
for name, config in CONFIG['cameras'].items():
|
for name, config in CONFIG['cameras'].items():
|
||||||
config['snapshots'] = {
|
config['snapshots'] = {
|
||||||
'show_timestamp': config.get('snapshots', {}).get('show_timestamp', True),
|
'show_timestamp': config.get('snapshots', {}).get('show_timestamp', True),
|
||||||
'draw_zones': config.get('snapshots', {}).get('draw_zones', False)
|
'draw_zones': config.get('snapshots', {}).get('draw_zones', False),
|
||||||
|
'draw_bounding_boxes': config.get('snapshots', {}).get('draw_bounding_boxes', True)
|
||||||
}
|
}
|
||||||
config['zones'] = config.get('zones', {})
|
config['zones'] = config.get('zones', {})
|
||||||
|
|
||||||
@@ -312,7 +313,7 @@ def main():
|
|||||||
shm.close()
|
shm.close()
|
||||||
shm.unlink()
|
shm.unlink()
|
||||||
|
|
||||||
for detector in detectors:
|
for detector in detectors.values():
|
||||||
detector.stop()
|
detector.stop()
|
||||||
for shm in camera_shms:
|
for shm in camera_shms:
|
||||||
shm.close()
|
shm.close()
|
||||||
@@ -388,9 +389,11 @@ def main():
|
|||||||
def best(camera_name, label):
|
def best(camera_name, label):
|
||||||
if camera_name in CONFIG['cameras']:
|
if camera_name in CONFIG['cameras']:
|
||||||
best_object = object_processor.get_best(camera_name, label)
|
best_object = object_processor.get_best(camera_name, label)
|
||||||
best_frame = best_object.get('frame', np.zeros((720,1280,3), np.uint8))
|
best_frame = best_object.get('frame')
|
||||||
|
if best_frame is None:
|
||||||
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
|
best_frame = np.zeros((720,1280,3), np.uint8)
|
||||||
|
else:
|
||||||
|
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
|
||||||
|
|
||||||
crop = bool(request.args.get('crop', 0, type=int))
|
crop = bool(request.args.get('crop', 0, type=int))
|
||||||
if crop:
|
if crop:
|
||||||
|
|||||||
@@ -181,10 +181,12 @@ class CameraState():
|
|||||||
# check each zone
|
# check each zone
|
||||||
for name, zone in self.config['zones'].items():
|
for name, zone in self.config['zones'].items():
|
||||||
contour = zone['contour']
|
contour = zone['contour']
|
||||||
# check if the object is in the zone and not filtered
|
# check if the object is in the zone
|
||||||
if (cv2.pointPolygonTest(contour, bottom_center, False) >= 0
|
if (cv2.pointPolygonTest(contour, bottom_center, False) >= 0):
|
||||||
and not zone_filtered(obj, zone.get('filters', {}))):
|
# if the object passed the filters once, dont apply again
|
||||||
current_zones.append(name)
|
if name in obj.get('zones', []) or not zone_filtered(obj, zone.get('filters', {})):
|
||||||
|
current_zones.append(name)
|
||||||
|
|
||||||
obj['zones'] = current_zones
|
obj['zones'] = current_zones
|
||||||
|
|
||||||
# maintain best objects
|
# maintain best objects
|
||||||
@@ -266,7 +268,14 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
def snapshot(camera, obj):
|
def snapshot(camera, obj):
|
||||||
if not 'frame' in obj:
|
if not 'frame' in obj:
|
||||||
return
|
return
|
||||||
|
|
||||||
best_frame = cv2.cvtColor(obj['frame'], cv2.COLOR_YUV2BGR_I420)
|
best_frame = cv2.cvtColor(obj['frame'], cv2.COLOR_YUV2BGR_I420)
|
||||||
|
if self.camera_config[camera]['snapshots']['draw_bounding_boxes']:
|
||||||
|
thickness = 2
|
||||||
|
color = COLOR_MAP[obj['label']]
|
||||||
|
box = obj['box']
|
||||||
|
draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
|
||||||
|
|
||||||
mqtt_config = self.camera_config[camera].get('mqtt', {'crop_to_region': False})
|
mqtt_config = self.camera_config[camera].get('mqtt', {'crop_to_region': False})
|
||||||
if mqtt_config.get('crop_to_region'):
|
if mqtt_config.get('crop_to_region'):
|
||||||
region = obj['region']
|
region = obj['region']
|
||||||
@@ -275,6 +284,16 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
height = int(mqtt_config['snapshot_height'])
|
height = int(mqtt_config['snapshot_height'])
|
||||||
width = int(height*best_frame.shape[1]/best_frame.shape[0])
|
width = int(height*best_frame.shape[1]/best_frame.shape[0])
|
||||||
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||||
|
|
||||||
|
if self.camera_config[camera]['snapshots']['show_timestamp']:
|
||||||
|
time_to_show = datetime.datetime.fromtimestamp(obj['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
|
||||||
|
size = cv2.getTextSize(time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2)
|
||||||
|
text_width = size[0][0]
|
||||||
|
text_height = size[0][1]
|
||||||
|
desired_size = max(200, 0.33*best_frame.shape[1])
|
||||||
|
font_scale = desired_size/text_width
|
||||||
|
cv2.putText(best_frame, time_to_show, (5, best_frame.shape[0]-7), cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, color=(255, 255, 255), thickness=2)
|
||||||
|
|
||||||
ret, jpg = cv2.imencode('.jpg', best_frame)
|
ret, jpg = cv2.imencode('.jpg', best_frame)
|
||||||
if ret:
|
if ret:
|
||||||
jpg_bytes = jpg.tobytes()
|
jpg_bytes = jpg.tobytes()
|
||||||
|
|||||||
Reference in New Issue
Block a user