forked from Github/frigate
Compare commits
6 Commits
v0.3.0-bet
...
v0.3.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ab3e70b4db | ||
|
|
d90e408d50 | ||
|
|
6c87ce0879 | ||
|
|
b7b4e38f62 | ||
|
|
480175d70f | ||
|
|
bee99ca6ff |
17
Dockerfile
17
Dockerfile
@@ -53,14 +53,6 @@ RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
|
||||
libva-drm2 libva2 i965-va-driver vainfo \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install core packages
|
||||
RUN wget -q -O /tmp/get-pip.py --no-check-certificate https://bootstrap.pypa.io/get-pip.py && python3 /tmp/get-pip.py
|
||||
RUN pip install -U pip \
|
||||
numpy \
|
||||
Flask \
|
||||
paho-mqtt \
|
||||
PyYAML
|
||||
|
||||
# Download & build OpenCV
|
||||
# TODO: use multistage build to reduce image size:
|
||||
# https://medium.com/@denismakogon/pain-and-gain-running-opencv-application-with-golang-and-docker-on-alpine-3-7-435aa11c7aec
|
||||
@@ -101,6 +93,15 @@ RUN ln -s /coco_labels.txt /label_map.pbtext
|
||||
RUN (apt-get autoremove -y; \
|
||||
apt-get autoclean -y)
|
||||
|
||||
# Install core packages
|
||||
RUN wget -q -O /tmp/get-pip.py --no-check-certificate https://bootstrap.pypa.io/get-pip.py && python3 /tmp/get-pip.py
|
||||
RUN pip install -U pip \
|
||||
numpy \
|
||||
Flask \
|
||||
paho-mqtt \
|
||||
PyYAML \
|
||||
matplotlib
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
ADD frigate frigate/
|
||||
COPY detect_objects.py .
|
||||
|
||||
@@ -120,13 +120,11 @@ def main():
|
||||
|
||||
def imagestream(camera_name):
|
||||
while True:
|
||||
# max out at 5 FPS
|
||||
time.sleep(0.2)
|
||||
# max out at 1 FPS
|
||||
time.sleep(1)
|
||||
frame = cameras[camera_name].get_current_frame_with_objects()
|
||||
# encode the image into a jpg
|
||||
ret, jpg = cv2.imencode('.jpg', frame)
|
||||
yield (b'--frame\r\n'
|
||||
b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
|
||||
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
|
||||
|
||||
app.run(host='0.0.0.0', port=WEB_PORT, debug=False)
|
||||
|
||||
|
||||
@@ -33,8 +33,8 @@ class MqttObjectPublisher(threading.Thread):
|
||||
if new_status != current_object_status[obj_name]:
|
||||
current_object_status[obj_name] = new_status
|
||||
self.client.publish(self.topic_prefix+'/'+obj_name, new_status, retain=False)
|
||||
# send the snapshot over mqtt as well
|
||||
if not self.best_frames.best_frames[obj_name] is None:
|
||||
# send the snapshot over mqtt if we have it as well
|
||||
if obj_name in self.best_frames.best_frames:
|
||||
ret, jpg = cv2.imencode('.jpg', self.best_frames.best_frames[obj_name])
|
||||
if ret:
|
||||
jpg_bytes = jpg.tobytes()
|
||||
@@ -43,4 +43,5 @@ class MqttObjectPublisher(threading.Thread):
|
||||
# expire any objects that are ON and no longer detected
|
||||
expired_objects = [obj_name for obj_name, status in current_object_status.items() if status == 'ON' and not obj_name in obj_counter]
|
||||
for obj_name in expired_objects:
|
||||
current_object_status[obj_name] = 'OFF'
|
||||
self.client.publish(self.topic_prefix+'/'+obj_name, 'OFF', retain=False)
|
||||
@@ -4,22 +4,7 @@ import cv2
|
||||
import threading
|
||||
import numpy as np
|
||||
from edgetpu.detection.engine import DetectionEngine
|
||||
from . util import tonumpyarray
|
||||
|
||||
# Path to frozen detection graph. This is the actual model that is used for the object detection.
|
||||
PATH_TO_CKPT = '/frozen_inference_graph.pb'
|
||||
# List of the strings that is used to add correct label for each box.
|
||||
PATH_TO_LABELS = '/label_map.pbtext'
|
||||
|
||||
# Function to read labels from text files.
|
||||
def ReadLabelFile(file_path):
|
||||
with open(file_path, 'r') as f:
|
||||
lines = f.readlines()
|
||||
ret = {}
|
||||
for line in lines:
|
||||
pair = line.strip().split(maxsplit=1)
|
||||
ret[int(pair[0])] = pair[1].strip()
|
||||
return ret
|
||||
from . util import tonumpyarray, LABELS, PATH_TO_CKPT
|
||||
|
||||
class PreppedQueueProcessor(threading.Thread):
|
||||
def __init__(self, cameras, prepped_frame_queue):
|
||||
@@ -30,7 +15,7 @@ class PreppedQueueProcessor(threading.Thread):
|
||||
|
||||
# Load the edgetpu engine and labels
|
||||
self.engine = DetectionEngine(PATH_TO_CKPT)
|
||||
self.labels = ReadLabelFile(PATH_TO_LABELS)
|
||||
self.labels = LABELS
|
||||
|
||||
def run(self):
|
||||
# process queue...
|
||||
|
||||
@@ -73,9 +73,8 @@ class BestFrames(threading.Thread):
|
||||
if obj['frame_time'] in recent_frames:
|
||||
best_frame = recent_frames[obj['frame_time']] #, np.zeros((720,1280,3), np.uint8))
|
||||
|
||||
label = "{}: {}% {}".format(name,int(obj['score']*100),int(obj['area']))
|
||||
draw_box_with_label(best_frame, obj['xmin'], obj['ymin'],
|
||||
obj['xmax'], obj['ymax'], label)
|
||||
obj['xmax'], obj['ymax'], obj['name'], obj['score'], obj['area'])
|
||||
|
||||
# print a timestamp
|
||||
time_to_show = datetime.datetime.fromtimestamp(obj['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
|
||||
|
||||
@@ -1,19 +1,31 @@
|
||||
import numpy as np
|
||||
import cv2
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Function to read labels from text files.
|
||||
def ReadLabelFile(file_path):
|
||||
with open(file_path, 'r') as f:
|
||||
lines = f.readlines()
|
||||
ret = {}
|
||||
for line in lines:
|
||||
pair = line.strip().split(maxsplit=1)
|
||||
ret[int(pair[0])] = pair[1].strip()
|
||||
return ret
|
||||
|
||||
# convert shared memory array into numpy array
|
||||
def tonumpyarray(mp_arr):
|
||||
return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)
|
||||
|
||||
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label):
|
||||
color = (255,0,0)
|
||||
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, score, area):
|
||||
color = COLOR_MAP[label]
|
||||
display_text = "{}: {}% {}".format(label,int(score*100),int(area))
|
||||
cv2.rectangle(frame, (x_min, y_min),
|
||||
(x_max, y_max),
|
||||
color, 2)
|
||||
font_scale = 0.5
|
||||
font = cv2.FONT_HERSHEY_SIMPLEX
|
||||
# get the width and height of the text box
|
||||
size = cv2.getTextSize(label, font, fontScale=font_scale, thickness=2)
|
||||
size = cv2.getTextSize(display_text, font, fontScale=font_scale, thickness=2)
|
||||
text_width = size[0][0]
|
||||
text_height = size[0][1]
|
||||
line_height = text_height + size[1]
|
||||
@@ -23,4 +35,16 @@ def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label):
|
||||
# make the coords of the box with a small padding of two pixels
|
||||
textbox_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y + line_height))
|
||||
cv2.rectangle(frame, textbox_coords[0], textbox_coords[1], color, cv2.FILLED)
|
||||
cv2.putText(frame, label, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
|
||||
cv2.putText(frame, display_text, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
|
||||
|
||||
# Path to frozen detection graph. This is the actual model that is used for the object detection.
|
||||
PATH_TO_CKPT = '/frozen_inference_graph.pb'
|
||||
# List of the strings that is used to add correct label for each box.
|
||||
PATH_TO_LABELS = '/label_map.pbtext'
|
||||
|
||||
LABELS = ReadLabelFile(PATH_TO_LABELS)
|
||||
cmap = plt.cm.get_cmap('tab10', len(LABELS.keys()))
|
||||
|
||||
COLOR_MAP = {}
|
||||
for key, val in LABELS.items():
|
||||
COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
|
||||
@@ -145,6 +145,12 @@ class Camera:
|
||||
# Condition for notifying that objects were parsed
|
||||
self.objects_parsed = mp.Condition()
|
||||
|
||||
# initialize the frame cache
|
||||
self.cached_frame_with_objects = {
|
||||
'frame_bytes': [],
|
||||
'frame_time': 0
|
||||
}
|
||||
|
||||
self.ffmpeg_process = None
|
||||
self.capture_thread = None
|
||||
|
||||
@@ -316,10 +322,12 @@ class Camera:
|
||||
frame = self.current_frame.copy()
|
||||
frame_time = self.frame_time.value
|
||||
|
||||
if frame_time == self.cached_frame_with_objects['frame_time']:
|
||||
return self.cached_frame_with_objects['frame_bytes']
|
||||
|
||||
# draw the bounding boxes on the screen
|
||||
for obj in detected_objects:
|
||||
label = "{}: {}% {}".format(obj['name'],int(obj['score']*100),int(obj['area']))
|
||||
draw_box_with_label(frame, obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'], label)
|
||||
draw_box_with_label(frame, obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'], obj['name'], obj['score'], obj['area'])
|
||||
|
||||
for region in self.regions:
|
||||
color = (255,255,255)
|
||||
@@ -334,7 +342,17 @@ class Camera:
|
||||
# convert to BGR
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
||||
|
||||
return frame
|
||||
# encode the image into a jpg
|
||||
ret, jpg = cv2.imencode('.jpg', frame)
|
||||
|
||||
frame_bytes = jpg.tobytes()
|
||||
|
||||
self.cached_frame_with_objects = {
|
||||
'frame_bytes': frame_bytes,
|
||||
'frame_time': frame_time
|
||||
}
|
||||
|
||||
return frame_bytes
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user