Compare commits

...

47 Commits

Author SHA1 Message Date
Blake Blackshear
cd057370e1 fallback to opencv to detect resolution and allow config to specify 2020-02-22 09:03:00 -06:00
Blake Blackshear
6263912655 use ffprobe to get frame shape (fixes #87) 2020-02-22 09:03:00 -06:00
Blake Blackshear
af247275cf make timestamp on snapshots configurable (fixes #88) 2020-02-22 09:03:00 -06:00
Blake Blackshear
1198c29dac make watchdog timeout configurable per camera (fixes #95) 2020-02-22 09:03:00 -06:00
Blake Blackshear
169603d3ff attempt to fix regions in process key error 2020-02-22 09:03:00 -06:00
Blake Blackshear
dc7eecebc6 clarify config 2020-02-22 09:03:00 -06:00
Blake Blackshear
0dd4087d5d switch base image back to ubuntu:18.04 2020-02-22 09:03:00 -06:00
Blake Blackshear
6ecf87fc60 update config example 2020-02-22 09:03:00 -06:00
Blake Blackshear
ebcf1482f8 remove region in process when skipping 2020-02-22 09:03:00 -06:00
Blake Blackshear
50bcf60893 switch to opencv headless 2020-02-22 09:03:00 -06:00
Blake Blackshear
38efbd63ea add camera name to ffmpeg log messages 2020-02-22 09:03:00 -06:00
Blake Blackshear
50bcad8b77 skip regions when the queue is too full and add more locks 2020-02-22 09:03:00 -06:00
Blake Blackshear
cfffb219ae switch back to stretch for hwaccel issues 2020-02-22 09:03:00 -06:00
Blake Blackshear
382d7be50a check correct object 2020-02-22 09:03:00 -06:00
Blake Blackshear
f43dc36a37 cleanup 2020-02-22 09:03:00 -06:00
Blake Blackshear
38e7fa07d2 add a label position arg for bounding boxes 2020-02-22 09:03:00 -06:00
Blake Blackshear
e261c20819 let the queues get as big as needed 2020-02-22 09:03:00 -06:00
Blake Blackshear
3a66e672d3 notify mqtt when objects deregistered 2020-02-22 09:03:00 -06:00
Blake Blackshear
2aada930e3 fix multiple object type tracking 2020-02-22 09:03:00 -06:00
Blake Blackshear
d87f4407a0 switch everything to run off of tracked objects 2020-02-22 09:03:00 -06:00
Blake Blackshear
be5a114f6a group by label before tracking objects 2020-02-22 09:03:00 -06:00
Blake Blackshear
32b212c7b6 fix mask filtering 2020-02-22 09:03:00 -06:00
Blake Blackshear
76c8e3a12f make a copy 2020-02-22 09:03:00 -06:00
Blake Blackshear
16f7a361c3 fix object filters 2020-02-22 09:03:00 -06:00
Blake Blackshear
634b87307f group by label before suppressing boxes 2020-02-22 09:03:00 -06:00
Blake Blackshear
1d4fbbdba3 update all obj props 2020-02-22 09:03:00 -06:00
Blake Blackshear
65579e9cbf add thread to write frames to disk 2020-02-22 09:03:00 -06:00
Blake Blackshear
49dc029c43 merge boxes by label 2020-02-22 09:03:00 -06:00
Blake Blackshear
08174d8db2 fix color of best image 2020-02-22 09:03:00 -06:00
Blake Blackshear
5199242a68 remove unused current frame variable 2020-02-22 09:03:00 -06:00
Blake Blackshear
725dd3220c removing pillow-simd for now 2020-02-22 09:03:00 -06:00
Blake Blackshear
10dc56f6ea revamp dockerfile 2020-02-22 09:03:00 -06:00
Blake Blackshear
cc2abe93a6 track objects and add config for tracked objects 2020-02-22 09:03:00 -06:00
Blake Blackshear
0c6717090c implement filtering and switch to NMS with OpenCV 2020-02-22 09:03:00 -06:00
Blake Blackshear
f5a2252b29 cleanup imports 2020-02-22 09:03:00 -06:00
Blake Blackshear
02efb6f415 fixing a few things 2020-02-22 09:03:00 -06:00
Blake Blackshear
5b4c6e50bc dedupe detected objects 2020-02-22 09:03:00 -06:00
Blake Blackshear
9cc46a71cb working dynamic regions, but messy 2020-02-22 09:03:00 -06:00
Blake Blackshear
be1673b00a process detected objects in a queue 2020-02-22 09:03:00 -06:00
Blake Blackshear
b6130e77ff label threads and implements stats endpoint 2020-02-22 09:03:00 -06:00
Blake Blackshear
4180c710cd refactor resizing into generic priority queues 2020-02-22 09:03:00 -06:00
Blake Blackshear
ab3e70b4db check to see if we have a frame before trying to send 2020-01-02 07:39:57 -06:00
Blake Blackshear
d90e408d50 set the current object status to off when expired 2020-01-02 07:39:57 -06:00
Blake Blackshear
6c87ce0879 cache the computed jpg bytes to reduce cpu usage 2020-01-02 07:39:57 -06:00
Blake Blackshear
b7b4e38f62 slow down the preview feed to lower cpu usage 2020-01-02 07:39:57 -06:00
Blake Blackshear
480175d70f add color map to use different colors for different objects 2020-01-02 07:39:57 -06:00
Blake Blackshear
bee99ca6ff track and report all detected object types 2020-01-02 07:39:57 -06:00
12 changed files with 1014 additions and 486 deletions

View File

@@ -1,106 +1,50 @@
FROM ubuntu:18.04
LABEL maintainer "blakeb@blakeshome.com"
ARG DEVICE
ENV DEBIAN_FRONTEND=noninteractive
# Install packages for apt repo
RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
apt-transport-https \
ca-certificates \
curl \
wget \
gnupg-agent \
dirmngr \
software-properties-common \
&& rm -rf /var/lib/apt/lists/*
RUN apt -qq update && apt -qq install --no-install-recommends -y \
apt-transport-https ca-certificates \
gnupg wget \
ffmpeg \
python3 \
python3-pip \
python3-dev \
python3-numpy \
# python-prctl
build-essential libcap-dev \
# pillow-simd
# zlib1g-dev libjpeg-dev \
# VAAPI drivers for Intel hardware accel
libva-drm2 libva2 i965-va-driver vainfo \
&& echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" > /etc/apt/sources.list.d/coral-edgetpu.list \
&& wget -q -O - https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \
&& apt -qq update \
&& echo "libedgetpu1-max libedgetpu/accepted-eula boolean true" | debconf-set-selections \
&& apt -qq install --no-install-recommends -y \
libedgetpu1-max \
python3-edgetpu \
&& rm -rf /var/lib/apt/lists/* \
&& (apt-get autoremove -y; apt-get autoclean -y)
COPY scripts/install_odroid_repo.sh .
# needs to be installed before others
RUN pip3 install -U wheel setuptools
RUN if [ "$DEVICE" = "odroid" ]; then \
sh /install_odroid_repo.sh; \
fi
RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
python3 \
# OpenCV dependencies
ffmpeg \
build-essential \
cmake \
unzip \
pkg-config \
libjpeg-dev \
libpng-dev \
libtiff-dev \
libavcodec-dev \
libavformat-dev \
libswscale-dev \
libv4l-dev \
libxvidcore-dev \
libx264-dev \
libgtk-3-dev \
libatlas-base-dev \
gfortran \
python3-dev \
# Coral USB Python API Dependencies
libusb-1.0-0 \
python3-pip \
python3-pil \
python3-numpy \
libc++1 \
libc++abi1 \
libunwind8 \
libgcc1 \
# VAAPI drivers for Intel hardware accel
libva-drm2 libva2 i965-va-driver vainfo \
&& rm -rf /var/lib/apt/lists/*
# Install core packages
RUN wget -q -O /tmp/get-pip.py --no-check-certificate https://bootstrap.pypa.io/get-pip.py && python3 /tmp/get-pip.py
RUN pip install -U pip \
numpy \
Flask \
paho-mqtt \
PyYAML
# Download & build OpenCV
# TODO: use multistage build to reduce image size:
# https://medium.com/@denismakogon/pain-and-gain-running-opencv-application-with-golang-and-docker-on-alpine-3-7-435aa11c7aec
# https://www.merixstudio.com/blog/docker-multi-stage-builds-python-development/
RUN wget -q -P /usr/local/src/ --no-check-certificate https://github.com/opencv/opencv/archive/4.0.1.zip
RUN cd /usr/local/src/ \
&& unzip 4.0.1.zip \
&& rm 4.0.1.zip \
&& cd /usr/local/src/opencv-4.0.1/ \
&& mkdir build \
&& cd /usr/local/src/opencv-4.0.1/build \
&& cmake -D CMAKE_INSTALL_TYPE=Release -D CMAKE_INSTALL_PREFIX=/usr/local/ .. \
&& make -j4 \
&& make install \
&& ldconfig \
&& rm -rf /usr/local/src/opencv-4.0.1
# Download and install EdgeTPU libraries for Coral
RUN wget https://dl.google.com/coral/edgetpu_api/edgetpu_api_latest.tar.gz -O edgetpu_api.tar.gz --trust-server-names \
&& tar xzf edgetpu_api.tar.gz
COPY scripts/install_edgetpu_api.sh edgetpu_api/install.sh
RUN cd edgetpu_api \
&& /bin/bash install.sh
# Copy a python 3.6 version
RUN cd /usr/local/lib/python3.6/dist-packages/edgetpu/swig/ \
&& ln -s _edgetpu_cpp_wrapper.cpython-35m-arm-linux-gnueabihf.so _edgetpu_cpp_wrapper.cpython-36m-arm-linux-gnueabihf.so
RUN pip3 install -U \
opencv-python-headless \
python-prctl \
Flask \
paho-mqtt \
PyYAML \
matplotlib \
scipy
# symlink the model and labels
RUN wget https://dl.google.com/coral/canned_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite -O mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite --trust-server-names
RUN wget https://dl.google.com/coral/canned_models/coco_labels.txt -O coco_labels.txt --trust-server-names
RUN wget -q https://github.com/google-coral/edgetpu/raw/master/test_data/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite -O mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite --trust-server-names
RUN wget -q https://dl.google.com/coral/canned_models/coco_labels.txt -O coco_labels.txt --trust-server-names
RUN ln -s mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite /frozen_inference_graph.pb
RUN ln -s /coco_labels.txt /label_map.pbtext
# Minimize image size
RUN (apt-get autoremove -y; \
apt-get autoclean -y)
WORKDIR /opt/frigate/
ADD frigate frigate/
COPY detect_objects.py .

View File

@@ -55,20 +55,22 @@ Example docker-compose:
A `config.yml` file must exist in the `config` directory. See example [here](config/config.example.yml) and device specific info can be found [here](docs/DEVICES.md).
Access the mjpeg stream at `http://localhost:5000/<camera_name>` and the best person snapshot at `http://localhost:5000/<camera_name>/best_person.jpg`
Access the mjpeg stream at `http://localhost:5000/<camera_name>` and the best snapshot for any object type with at `http://localhost:5000/<camera_name>/<object_name>/best.jpg`
## Integration with HomeAssistant
```
camera:
- name: Camera Last Person
platform: mqtt
topic: frigate/<camera_name>/snapshot
topic: frigate/<camera_name>/person/snapshot
- name: Camera Last Car
platform: mqtt
topic: frigate/<camera_name>/car/snapshot
binary_sensor:
- name: Camera Person
platform: mqtt
state_topic: "frigate/<camera_name>/objects"
value_template: '{{ value_json.person }}'
state_topic: "frigate/<camera_name>/person"
device_class: motion
availability_topic: "frigate/available"
@@ -89,7 +91,7 @@ automation:
message: "A person was detected."
data:
photo:
- url: http://<ip>:5000/<camera_name>/best_person.jpg
- url: http://<ip>:5000/<camera_name>/person/best.jpg
caption: A person was detected.
```

View File

@@ -14,7 +14,7 @@ flattened_frame = np.expand_dims(frame, axis=0).flatten()
detection_times = []
for x in range(0, 1000):
objects = engine.DetectWithInputTensor(flattened_frame, threshold=0.1, top_k=3)
objects = engine.detect_with_input_tensor(flattened_frame, threshold=0.1, top_k=3)
detection_times.append(engine.get_inference_time())
print("Average inference time: " + str(statistics.mean(detection_times)))

View File

@@ -45,7 +45,27 @@ mqtt:
# - rawvideo
# - -pix_fmt
# - rgb24
####################
# Global object configuration. Applies to all cameras
# unless overridden at the camera levels.
# Keys must be valid labels. By default, the model uses coco (https://dl.google.com/coral/canned_models/coco_labels.txt).
# All labels from the model are reported over MQTT. These values are used to filter out false positives.
# min_area (optional): minimum width*height of the bounding box for the detected person
# max_area (optional): maximum width*height of the bounding box for the detected person
# threshold (optional): The minimum decimal percentage (50% hit = 0.5) for the confidence from tensorflow
####################
objects:
track:
- person
- car
- truck
filters:
person:
min_area: 5000
max_area: 100000
threshold: 0.5
cameras:
back:
ffmpeg:
@@ -61,6 +81,12 @@ cameras:
# hwaccel_args: []
# input_args: []
# output_args: []
################
## Optionally specify the resolution of the video feed. Frigate will try to auto detect if not specified
################
# height: 1280
# width: 720
################
## Optional mask. Must be the same dimensions as your video feed.
@@ -78,13 +104,34 @@ cameras:
# 3 every 3rd frame, etc.
################
take_frame: 1
################
# The number of seconds frigate will allow a camera to go without sending a frame before
# assuming the ffmpeg process has a problem and restarting.
################
# watchdog_timeout: 300
################
# Configuration for the snapshot sent over mqtt
################
snapshots:
show_timestamp: True
################
# Camera level object config. This config is merged with the global config above.
################
objects:
track:
- person
filters:
person:
min_area: 5000
max_area: 100000
threshold: 0.5
################
# size: size of the region in pixels
# x_offset/y_offset: position of the upper left corner of your region (top left of image is 0,0)
# min_person_area (optional): minimum width*height of the bounding box for the detected person
# max_person_area (optional): maximum width*height of the bounding box for the detected person
# threshold (optional): The minimum decimal percentage (50% hit = 0.5) for the confidence from tensorflow
# Tips: All regions are resized to 300x300 before detection because the model is trained on that size.
# Resizing regions takes CPU power. Ideally, all regions should be as close to 300x300 as possible.
# Defining a region that goes outside the bounds of the image will result in errors.
@@ -93,18 +140,9 @@ cameras:
- size: 350
x_offset: 0
y_offset: 300
min_person_area: 5000
max_person_area: 100000
threshold: 0.5
- size: 400
x_offset: 350
y_offset: 250
min_person_area: 2000
max_person_area: 100000
threshold: 0.5
- size: 400
x_offset: 750
y_offset: 250
min_person_area: 2000
max_person_area: 100000
threshold: 0.5

View File

@@ -3,11 +3,12 @@ import time
import queue
import yaml
import numpy as np
from flask import Flask, Response, make_response
from flask import Flask, Response, make_response, jsonify
import paho.mqtt.client as mqtt
from frigate.video import Camera
from frigate.object_detection import PreppedQueueProcessor
from frigate.util import EventsPerSecond
with open('/config/config.yml') as f:
CONFIG = yaml.safe_load(f)
@@ -42,6 +43,8 @@ FFMPEG_DEFAULT_CONFIG = {
'-pix_fmt', 'rgb24'])
}
GLOBAL_OBJECT_CONFIG = CONFIG.get('objects', {})
WEB_PORT = CONFIG.get('web_port', 5000)
DEBUG = (CONFIG.get('debug', '0') == '1')
@@ -68,19 +71,23 @@ def main():
client.connect(MQTT_HOST, MQTT_PORT, 60)
client.loop_start()
# Queue for prepped frames, max size set to (number of cameras * 5)
max_queue_size = len(CONFIG['cameras'].items())*5
prepped_frame_queue = queue.Queue(max_queue_size)
# Queue for prepped frames, max size set to number of regions * 3
prepped_frame_queue = queue.Queue()
cameras = {}
for name, config in CONFIG['cameras'].items():
cameras[name] = Camera(name, FFMPEG_DEFAULT_CONFIG, config, prepped_frame_queue, client, MQTT_TOPIC_PREFIX)
cameras[name] = Camera(name, FFMPEG_DEFAULT_CONFIG, GLOBAL_OBJECT_CONFIG, config,
prepped_frame_queue, client, MQTT_TOPIC_PREFIX)
fps_tracker = EventsPerSecond()
prepped_queue_processor = PreppedQueueProcessor(
cameras,
prepped_frame_queue
prepped_frame_queue,
fps_tracker
)
prepped_queue_processor.start()
fps_tracker.start()
for name, camera in cameras.items():
camera.start()
@@ -94,18 +101,34 @@ def main():
# return a healh
return "Frigate is running. Alive and healthy!"
@app.route('/<camera_name>/best_person.jpg')
def best_person(camera_name):
@app.route('/debug/stats')
def stats():
stats = {
'coral': {
'fps': fps_tracker.eps(),
'inference_speed': prepped_queue_processor.avg_inference_speed,
'queue_length': prepped_frame_queue.qsize()
}
}
for name, camera in cameras.items():
stats[name] = camera.stats()
return jsonify(stats)
@app.route('/<camera_name>/<label>/best.jpg')
def best(camera_name, label):
if camera_name in cameras:
best_person_frame = cameras[camera_name].get_best_person()
if best_person_frame is None:
best_person_frame = np.zeros((720,1280,3), np.uint8)
ret, jpg = cv2.imencode('.jpg', best_person_frame)
best_frame = cameras[camera_name].get_best(label)
if best_frame is None:
best_frame = np.zeros((720,1280,3), np.uint8)
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR)
ret, jpg = cv2.imencode('.jpg', best_frame)
response = make_response(jpg.tobytes())
response.headers['Content-Type'] = 'image/jpg'
return response
else:
return f'Camera named {camera_name} not found', 404
return "Camera named {} not found".format(camera_name), 404
@app.route('/<camera_name>')
def mjpeg_feed(camera_name):
@@ -114,17 +137,15 @@ def main():
return Response(imagestream(camera_name),
mimetype='multipart/x-mixed-replace; boundary=frame')
else:
return f'Camera named {camera_name} not found', 404
return "Camera named {} not found".format(camera_name), 404
def imagestream(camera_name):
while True:
# max out at 5 FPS
time.sleep(0.2)
# max out at 1 FPS
time.sleep(1)
frame = cameras[camera_name].get_current_frame_with_objects()
# encode the image into a jpg
ret, jpg = cv2.imencode('.jpg', frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
app.run(host='0.0.0.0', port=WEB_PORT, debug=False)

View File

@@ -1,41 +1,54 @@
import json
import cv2
import threading
import prctl
from collections import Counter, defaultdict
import itertools
class MqttObjectPublisher(threading.Thread):
def __init__(self, client, topic_prefix, objects_parsed, detected_objects, best_person_frame):
def __init__(self, client, topic_prefix, camera):
threading.Thread.__init__(self)
self.client = client
self.topic_prefix = topic_prefix
self.objects_parsed = objects_parsed
self._detected_objects = detected_objects
self.best_person_frame = best_person_frame
self.camera = camera
def run(self):
last_sent_payload = ""
prctl.set_name(self.__class__.__name__)
current_object_status = defaultdict(lambda: 'OFF')
while True:
# wait until objects have been tracked
with self.camera.objects_tracked:
self.camera.objects_tracked.wait()
# initialize the payload
payload = {}
# count objects with more than 2 entries in history by type
obj_counter = Counter()
for obj in self.camera.object_tracker.tracked_objects.values():
if len(obj['history']) > 1:
obj_counter[obj['name']] += 1
# report on detected objects
for obj_name, count in obj_counter.items():
new_status = 'ON' if count > 0 else 'OFF'
if new_status != current_object_status[obj_name]:
current_object_status[obj_name] = new_status
self.client.publish(self.topic_prefix+'/'+obj_name, new_status, retain=False)
# send the snapshot over mqtt if we have it as well
if obj_name in self.camera.best_frames.best_frames:
best_frame = cv2.cvtColor(self.camera.best_frames.best_frames[obj_name], cv2.COLOR_RGB2BGR)
ret, jpg = cv2.imencode('.jpg', best_frame)
if ret:
jpg_bytes = jpg.tobytes()
self.client.publish(self.topic_prefix+'/'+obj_name+'/snapshot', jpg_bytes, retain=True)
# wait until objects have been parsed
with self.objects_parsed:
self.objects_parsed.wait()
# add all the person scores in detected objects
detected_objects = self._detected_objects.copy()
person_score = sum([obj['score'] for obj in detected_objects if obj['name'] == 'person'])
# if the person score is more than 100, set person to ON
payload['person'] = 'ON' if int(person_score*100) > 100 else 'OFF'
# send message for objects if different
new_payload = json.dumps(payload, sort_keys=True)
if new_payload != last_sent_payload:
last_sent_payload = new_payload
self.client.publish(self.topic_prefix+'/objects', new_payload, retain=False)
# send the snapshot over mqtt as well
if not self.best_person_frame.best_frame is None:
ret, jpg = cv2.imencode('.jpg', self.best_person_frame.best_frame)
# expire any objects that are ON and no longer detected
expired_objects = [obj_name for obj_name, status in current_object_status.items() if status == 'ON' and not obj_name in obj_counter]
for obj_name in expired_objects:
current_object_status[obj_name] = 'OFF'
self.client.publish(self.topic_prefix+'/'+obj_name, 'OFF', retain=False)
# send updated snapshot snapshot over mqtt if we have it as well
if obj_name in self.camera.best_frames.best_frames:
best_frame = cv2.cvtColor(self.camera.best_frames.best_frames[obj_name], cv2.COLOR_RGB2BGR)
ret, jpg = cv2.imencode('.jpg', best_frame)
if ret:
jpg_bytes = jpg.tobytes()
self.client.publish(self.topic_prefix+'/snapshot', jpg_bytes, retain=True)
self.client.publish(self.topic_prefix+'/'+obj_name+'/snapshot', jpg_bytes, retain=True)

View File

@@ -2,27 +2,15 @@ import datetime
import time
import cv2
import threading
import copy
import prctl
import numpy as np
from edgetpu.detection.engine import DetectionEngine
from . util import tonumpyarray
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = '/label_map.pbtext'
# Function to read labels from text files.
def ReadLabelFile(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
from frigate.util import tonumpyarray, LABELS, PATH_TO_CKPT, calculate_region
class PreppedQueueProcessor(threading.Thread):
def __init__(self, cameras, prepped_frame_queue):
def __init__(self, cameras, prepped_frame_queue, fps):
threading.Thread.__init__(self)
self.cameras = cameras
@@ -30,83 +18,122 @@ class PreppedQueueProcessor(threading.Thread):
# Load the edgetpu engine and labels
self.engine = DetectionEngine(PATH_TO_CKPT)
self.labels = ReadLabelFile(PATH_TO_LABELS)
self.labels = LABELS
self.fps = fps
self.avg_inference_speed = 10
def run(self):
prctl.set_name(self.__class__.__name__)
# process queue...
while True:
frame = self.prepped_frame_queue.get()
# Actual detection.
objects = self.engine.DetectWithInputTensor(frame['frame'], threshold=frame['region_threshold'], top_k=3)
# print(self.engine.get_inference_time())
frame['detected_objects'] = self.engine.detect_with_input_tensor(frame['frame'], threshold=0.2, top_k=5)
self.fps.update()
self.avg_inference_speed = (self.avg_inference_speed*9 + self.engine.get_inference_time())/10
# parse and pass detected objects back to the camera
parsed_objects = []
for obj in objects:
box = obj.bounding_box.flatten().tolist()
parsed_objects.append({
'frame_time': frame['frame_time'],
'name': str(self.labels[obj.label_id]),
'score': float(obj.score),
'xmin': int((box[0] * frame['region_size']) + frame['region_x_offset']),
'ymin': int((box[1] * frame['region_size']) + frame['region_y_offset']),
'xmax': int((box[2] * frame['region_size']) + frame['region_x_offset']),
'ymax': int((box[3] * frame['region_size']) + frame['region_y_offset'])
})
self.cameras[frame['camera_name']].add_objects(parsed_objects)
# should this be a region class?
class FramePrepper(threading.Thread):
def __init__(self, camera_name, shared_frame, frame_time, frame_ready,
frame_lock,
region_size, region_x_offset, region_y_offset, region_threshold,
prepped_frame_queue):
self.cameras[frame['camera_name']].detected_objects_queue.put(frame)
class RegionRequester(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera_name = camera_name
self.shared_frame = shared_frame
self.frame_time = frame_time
self.frame_ready = frame_ready
self.frame_lock = frame_lock
self.region_size = region_size
self.region_x_offset = region_x_offset
self.region_y_offset = region_y_offset
self.region_threshold = region_threshold
self.prepped_frame_queue = prepped_frame_queue
self.camera = camera
def run(self):
prctl.set_name(self.__class__.__name__)
frame_time = 0.0
while True:
now = datetime.datetime.now().timestamp()
with self.frame_ready:
with self.camera.frame_ready:
# if there isnt a frame ready for processing or it is old, wait for a new frame
if self.frame_time.value == frame_time or (now - self.frame_time.value) > 0.5:
self.frame_ready.wait()
if self.camera.frame_time.value == frame_time or (now - self.camera.frame_time.value) > 0.5:
self.camera.frame_ready.wait()
# make a copy of the cropped frame
with self.frame_lock:
cropped_frame = self.shared_frame[self.region_y_offset:self.region_y_offset+self.region_size, self.region_x_offset:self.region_x_offset+self.region_size].copy()
frame_time = self.frame_time.value
# make a copy of the frame_time
frame_time = self.camera.frame_time.value
# grab the current tracked objects
with self.camera.object_tracker.tracked_objects_lock:
tracked_objects = copy.deepcopy(self.camera.object_tracker.tracked_objects).values()
with self.camera.regions_in_process_lock:
self.camera.regions_in_process[frame_time] = len(self.camera.config['regions'])
self.camera.regions_in_process[frame_time] += len(tracked_objects)
for index, region in enumerate(self.camera.config['regions']):
self.camera.resize_queue.put({
'camera_name': self.camera.name,
'frame_time': frame_time,
'region_id': index,
'size': region['size'],
'x_offset': region['x_offset'],
'y_offset': region['y_offset']
})
# request a region for tracked objects
for tracked_object in tracked_objects:
box = tracked_object['box']
# calculate a new region that will hopefully get the entire object
(size, x_offset, y_offset) = calculate_region(self.camera.frame_shape,
box['xmin'], box['ymin'],
box['xmax'], box['ymax'])
self.camera.resize_queue.put({
'camera_name': self.camera.name,
'frame_time': frame_time,
'region_id': -1,
'size': size,
'x_offset': x_offset,
'y_offset': y_offset
})
class RegionPrepper(threading.Thread):
def __init__(self, camera, frame_cache, resize_request_queue, prepped_frame_queue):
threading.Thread.__init__(self)
self.camera = camera
self.frame_cache = frame_cache
self.resize_request_queue = resize_request_queue
self.prepped_frame_queue = prepped_frame_queue
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
resize_request = self.resize_request_queue.get()
# if the queue is over 100 items long, only prep dynamic regions
if resize_request['region_id'] != -1 and self.prepped_frame_queue.qsize() > 100:
with self.camera.regions_in_process_lock:
self.camera.regions_in_process[resize_request['frame_time']] -= 1
if self.camera.regions_in_process[resize_request['frame_time']] == 0:
del self.camera.regions_in_process[resize_request['frame_time']]
self.camera.skipped_region_tracker.update()
continue
frame = self.frame_cache.get(resize_request['frame_time'], None)
if frame is None:
print("RegionPrepper: frame_time not in frame_cache")
with self.camera.regions_in_process_lock:
self.camera.regions_in_process[resize_request['frame_time']] -= 1
if self.camera.regions_in_process[resize_request['frame_time']] == 0:
del self.camera.regions_in_process[resize_request['frame_time']]
self.camera.skipped_region_tracker.update()
continue
# make a copy of the region
cropped_frame = frame[resize_request['y_offset']:resize_request['y_offset']+resize_request['size'], resize_request['x_offset']:resize_request['x_offset']+resize_request['size']].copy()
# Resize to 300x300 if needed
if cropped_frame.shape != (300, 300, 3):
# TODO: use Pillow-SIMD?
cropped_frame = cv2.resize(cropped_frame, dsize=(300, 300), interpolation=cv2.INTER_LINEAR)
# Expand dimensions since the model expects images to have shape: [1, 300, 300, 3]
frame_expanded = np.expand_dims(cropped_frame, axis=0)
# add the frame to the queue
if not self.prepped_frame_queue.full():
self.prepped_frame_queue.put({
'camera_name': self.camera_name,
'frame_time': frame_time,
'frame': frame_expanded.flatten().copy(),
'region_size': self.region_size,
'region_threshold': self.region_threshold,
'region_x_offset': self.region_x_offset,
'region_y_offset': self.region_y_offset
})
else:
print("queue full. moving on")
resize_request['frame'] = frame_expanded.flatten().copy()
self.prepped_frame_queue.put(resize_request)

View File

@@ -2,91 +2,417 @@ import time
import datetime
import threading
import cv2
from . util import draw_box_with_label
import prctl
import itertools
import copy
import numpy as np
import multiprocessing as mp
from collections import defaultdict
from scipy.spatial import distance as dist
from frigate.util import draw_box_with_label, LABELS, compute_intersection_rectangle, compute_intersection_over_union, calculate_region
class ObjectCleaner(threading.Thread):
def __init__(self, objects_parsed, detected_objects):
def __init__(self, camera):
threading.Thread.__init__(self)
self._objects_parsed = objects_parsed
self._detected_objects = detected_objects
self.camera = camera
def run(self):
prctl.set_name("ObjectCleaner")
while True:
# wait a bit before checking for expired frames
time.sleep(0.2)
# expire the objects that are more than 1 second old
now = datetime.datetime.now().timestamp()
# look for the first object found within the last second
# (newest objects are appended to the end)
detected_objects = self._detected_objects.copy()
for frame_time in list(self.camera.detected_objects.keys()).copy():
if not frame_time in self.camera.frame_cache:
del self.camera.detected_objects[frame_time]
objects_deregistered = False
with self.camera.object_tracker.tracked_objects_lock:
now = datetime.datetime.now().timestamp()
for id, obj in list(self.camera.object_tracker.tracked_objects.items()):
# if the object is more than 10 seconds old
# and not in the most recent frame, deregister
if (now - obj['frame_time']) > 10 and self.camera.object_tracker.most_recent_frame_time > obj['frame_time']:
self.camera.object_tracker.deregister(id)
objects_deregistered = True
if objects_deregistered:
with self.camera.objects_tracked:
self.camera.objects_tracked.notify_all()
num_to_delete = 0
for obj in detected_objects:
if now-obj['frame_time']<2:
break
num_to_delete += 1
if num_to_delete > 0:
del self._detected_objects[:num_to_delete]
# notify that parsed objects were changed
with self._objects_parsed:
self._objects_parsed.notify_all()
# Maintains the frame and person with the highest score from the most recent
# motion event
class BestPersonFrame(threading.Thread):
def __init__(self, objects_parsed, recent_frames, detected_objects):
class DetectedObjectsProcessor(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.objects_parsed = objects_parsed
self.recent_frames = recent_frames
self.detected_objects = detected_objects
self.best_person = None
self.best_frame = None
self.camera = camera
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
frame = self.camera.detected_objects_queue.get()
# wait until objects have been parsed
with self.objects_parsed:
self.objects_parsed.wait()
objects = frame['detected_objects']
# make a copy of detected objects
detected_objects = self.detected_objects.copy()
detected_people = [obj for obj in detected_objects if obj['name'] == 'person']
for raw_obj in objects:
name = str(LABELS[raw_obj.label_id])
# get the highest scoring person
new_best_person = max(detected_people, key=lambda x:x['score'], default=self.best_person)
if not name in self.camera.objects_to_track:
continue
# if there isnt a person, continue
if new_best_person is None:
obj = {
'name': name,
'score': float(raw_obj.score),
'box': {
'xmin': int((raw_obj.bounding_box[0][0] * frame['size']) + frame['x_offset']),
'ymin': int((raw_obj.bounding_box[0][1] * frame['size']) + frame['y_offset']),
'xmax': int((raw_obj.bounding_box[1][0] * frame['size']) + frame['x_offset']),
'ymax': int((raw_obj.bounding_box[1][1] * frame['size']) + frame['y_offset'])
},
'region': {
'xmin': frame['x_offset'],
'ymin': frame['y_offset'],
'xmax': frame['x_offset']+frame['size'],
'ymax': frame['y_offset']+frame['size']
},
'frame_time': frame['frame_time'],
'region_id': frame['region_id']
}
# if the object is within 5 pixels of the region border, and the region is not on the edge
# consider the object to be clipped
obj['clipped'] = False
if ((obj['region']['xmin'] > 5 and obj['box']['xmin']-obj['region']['xmin'] <= 5) or
(obj['region']['ymin'] > 5 and obj['box']['ymin']-obj['region']['ymin'] <= 5) or
(self.camera.frame_shape[1]-obj['region']['xmax'] > 5 and obj['region']['xmax']-obj['box']['xmax'] <= 5) or
(self.camera.frame_shape[0]-obj['region']['ymax'] > 5 and obj['region']['ymax']-obj['box']['ymax'] <= 5)):
obj['clipped'] = True
# Compute the area
# TODO: +1 right?
obj['area'] = (obj['box']['xmax']-obj['box']['xmin'])*(obj['box']['ymax']-obj['box']['ymin'])
self.camera.detected_objects[frame['frame_time']].append(obj)
# TODO: use in_process and processed counts instead to avoid lock
with self.camera.regions_in_process_lock:
if frame['frame_time'] in self.camera.regions_in_process:
self.camera.regions_in_process[frame['frame_time']] -= 1
# print(f"{frame['frame_time']} remaining regions {self.camera.regions_in_process[frame['frame_time']]}")
if self.camera.regions_in_process[frame['frame_time']] == 0:
del self.camera.regions_in_process[frame['frame_time']]
# print(f"{frame['frame_time']} no remaining regions")
self.camera.finished_frame_queue.put(frame['frame_time'])
else:
self.camera.finished_frame_queue.put(frame['frame_time'])
# Thread that checks finished frames for clipped objects and sends back
# for processing if needed
# TODO: evaluate whether or not i really need separate threads/queues for each step
# given that only 1 thread will really be able to run at a time. you need a
# separate process to actually do things in parallel for when you are CPU bound.
# threads are good when you are waiting and could be processing while you wait
class RegionRefiner(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
frame_time = self.camera.finished_frame_queue.get()
detected_objects = self.camera.detected_objects[frame_time].copy()
# print(f"{frame_time} finished")
# group by name
detected_object_groups = defaultdict(lambda: [])
for obj in detected_objects:
detected_object_groups[obj['name']].append(obj)
look_again = False
selected_objects = []
for group in detected_object_groups.values():
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
boxes = [(o['box']['xmin'], o['box']['ymin'], o['box']['xmax']-o['box']['xmin'], o['box']['ymax']-o['box']['ymin'])
for o in group]
confidences = [o['score'] for o in group]
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
for index in idxs:
obj = group[index[0]]
selected_objects.append(obj)
if obj['clipped']:
box = obj['box']
# calculate a new region that will hopefully get the entire object
(size, x_offset, y_offset) = calculate_region(self.camera.frame_shape,
box['xmin'], box['ymin'],
box['xmax'], box['ymax'])
# print(f"{frame_time} new region: {size} {x_offset} {y_offset}")
with self.camera.regions_in_process_lock:
if not frame_time in self.camera.regions_in_process:
self.camera.regions_in_process[frame_time] = 1
else:
self.camera.regions_in_process[frame_time] += 1
# add it to the queue
self.camera.resize_queue.put({
'camera_name': self.camera.name,
'frame_time': frame_time,
'region_id': -1,
'size': size,
'x_offset': x_offset,
'y_offset': y_offset
})
self.camera.dynamic_region_fps.update()
look_again = True
# if we are looking again, then this frame is not ready for processing
if look_again:
# remove the clipped objects
self.camera.detected_objects[frame_time] = [o for o in selected_objects if not o['clipped']]
continue
# if there is no current best_person
if self.best_person is None:
self.best_person = new_best_person
# if there is already a best_person
else:
now = datetime.datetime.now().timestamp()
# if the new best person is a higher score than the current best person
# or the current person is more than 1 minute old, use the new best person
if new_best_person['score'] > self.best_person['score'] or (now - self.best_person['frame_time']) > 60:
self.best_person = new_best_person
# make a copy of the recent frames
recent_frames = self.recent_frames.copy()
if not self.best_person is None and self.best_person['frame_time'] in recent_frames:
best_frame = recent_frames[self.best_person['frame_time']]
# filter objects based on camera settings
selected_objects = [o for o in selected_objects if not self.filtered(o)]
label = "{}: {}% {}".format(self.best_person['name'],int(self.best_person['score']*100),int(self.best_person['area']))
draw_box_with_label(best_frame, self.best_person['xmin'], self.best_person['ymin'],
self.best_person['xmax'], self.best_person['ymax'], label)
# print a timestamp
time_to_show = datetime.datetime.fromtimestamp(self.best_person['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
cv2.putText(best_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
self.best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR)
self.camera.detected_objects[frame_time] = selected_objects
# print(f"{frame_time} is actually finished")
# keep adding frames to the refined queue as long as they are finished
with self.camera.regions_in_process_lock:
while self.camera.frame_queue.qsize() > 0 and self.camera.frame_queue.queue[0] not in self.camera.regions_in_process:
self.camera.last_processed_frame = self.camera.frame_queue.get()
self.camera.refined_frame_queue.put(self.camera.last_processed_frame)
def filtered(self, obj):
object_name = obj['name']
if object_name in self.camera.object_filters:
obj_settings = self.camera.object_filters[object_name]
# if the min area is larger than the
# detected object, don't add it to detected objects
if obj_settings.get('min_area',-1) > obj['area']:
return True
# if the detected object is larger than the
# max area, don't add it to detected objects
if obj_settings.get('max_area', self.camera.frame_shape[0]*self.camera.frame_shape[1]) < obj['area']:
return True
# if the score is lower than the threshold, skip
if obj_settings.get('threshold', 0) > obj['score']:
return True
# compute the coordinates of the object and make sure
# the location isnt outside the bounds of the image (can happen from rounding)
y_location = min(int(obj['box']['ymax']), len(self.camera.mask)-1)
x_location = min(int((obj['box']['xmax']-obj['box']['xmin'])/2.0)+obj['box']['xmin'], len(self.camera.mask[0])-1)
# if the object is in a masked location, don't add it to detected objects
if self.camera.mask[y_location][x_location] == [0]:
return True
return False
def has_overlap(self, new_obj, obj, overlap=.7):
# compute intersection rectangle with existing object and new objects region
existing_obj_current_region = compute_intersection_rectangle(obj['box'], new_obj['region'])
# compute intersection rectangle with new object and existing objects region
new_obj_existing_region = compute_intersection_rectangle(new_obj['box'], obj['region'])
# compute iou for the two intersection rectangles that were just computed
iou = compute_intersection_over_union(existing_obj_current_region, new_obj_existing_region)
# if intersection is greater than overlap
if iou > overlap:
return True
else:
return False
def find_group(self, new_obj, groups):
for index, group in enumerate(groups):
for obj in group:
if self.has_overlap(new_obj, obj):
return index
return None
class ObjectTracker(threading.Thread):
def __init__(self, camera, max_disappeared):
threading.Thread.__init__(self)
self.camera = camera
self.tracked_objects = {}
self.tracked_objects_lock = mp.Lock()
self.most_recent_frame_time = None
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
frame_time = self.camera.refined_frame_queue.get()
with self.tracked_objects_lock:
self.match_and_update(self.camera.detected_objects[frame_time])
self.most_recent_frame_time = frame_time
self.camera.frame_output_queue.put((frame_time, copy.deepcopy(self.tracked_objects)))
if len(self.tracked_objects) > 0:
with self.camera.objects_tracked:
self.camera.objects_tracked.notify_all()
def register(self, index, obj):
id = "{}-{}".format(str(obj['frame_time']), index)
obj['id'] = id
obj['top_score'] = obj['score']
self.add_history(obj)
self.tracked_objects[id] = obj
def deregister(self, id):
del self.tracked_objects[id]
def update(self, id, new_obj):
self.tracked_objects[id].update(new_obj)
self.add_history(self.tracked_objects[id])
if self.tracked_objects[id]['score'] > self.tracked_objects[id]['top_score']:
self.tracked_objects[id]['top_score'] = self.tracked_objects[id]['score']
def add_history(self, obj):
entry = {
'score': obj['score'],
'box': obj['box'],
'region': obj['region'],
'centroid': obj['centroid'],
'frame_time': obj['frame_time']
}
if 'history' in obj:
obj['history'].append(entry)
else:
obj['history'] = [entry]
def match_and_update(self, new_objects):
if len(new_objects) == 0:
return
# group by name
new_object_groups = defaultdict(lambda: [])
for obj in new_objects:
new_object_groups[obj['name']].append(obj)
# track objects for each label type
for label, group in new_object_groups.items():
current_objects = [o for o in self.tracked_objects.values() if o['name'] == label]
current_ids = [o['id'] for o in current_objects]
current_centroids = np.array([o['centroid'] for o in current_objects])
# compute centroids of new objects
for obj in group:
centroid_x = int((obj['box']['xmin']+obj['box']['xmax']) / 2.0)
centroid_y = int((obj['box']['ymin']+obj['box']['ymax']) / 2.0)
obj['centroid'] = (centroid_x, centroid_y)
if len(current_objects) == 0:
for index, obj in enumerate(group):
self.register(index, obj)
return
new_centroids = np.array([o['centroid'] for o in group])
# compute the distance between each pair of tracked
# centroids and new centroids, respectively -- our
# goal will be to match each new centroid to an existing
# object centroid
D = dist.cdist(current_centroids, new_centroids)
# in order to perform this matching we must (1) find the
# smallest value in each row and then (2) sort the row
# indexes based on their minimum values so that the row
# with the smallest value is at the *front* of the index
# list
rows = D.min(axis=1).argsort()
# next, we perform a similar process on the columns by
# finding the smallest value in each column and then
# sorting using the previously computed row index list
cols = D.argmin(axis=1)[rows]
# in order to determine if we need to update, register,
# or deregister an object we need to keep track of which
# of the rows and column indexes we have already examined
usedRows = set()
usedCols = set()
# loop over the combination of the (row, column) index
# tuples
for (row, col) in zip(rows, cols):
# if we have already examined either the row or
# column value before, ignore it
if row in usedRows or col in usedCols:
continue
# otherwise, grab the object ID for the current row,
# set its new centroid, and reset the disappeared
# counter
objectID = current_ids[row]
self.update(objectID, group[col])
# indicate that we have examined each of the row and
# column indexes, respectively
usedRows.add(row)
usedCols.add(col)
# compute the column index we have NOT yet examined
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
# if the number of input centroids is greater
# than the number of existing object centroids we need to
# register each new input centroid as a trackable object
# if D.shape[0] < D.shape[1]:
# TODO: rather than assuming these are new objects, we could
# look to see if any of the remaining boxes have a large amount
# of overlap...
for col in unusedCols:
self.register(col, group[col])
# Maintains the frame and object with the highest score
class BestFrames(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
self.best_objects = {}
self.best_frames = {}
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
# wait until objects have been tracked
with self.camera.objects_tracked:
self.camera.objects_tracked.wait()
# make a copy of tracked objects
tracked_objects = list(self.camera.object_tracker.tracked_objects.values())
for obj in tracked_objects:
if obj['name'] in self.best_objects:
now = datetime.datetime.now().timestamp()
# if the object is a higher score than the current best score
# or the current object is more than 1 minute old, use the new object
if obj['score'] > self.best_objects[obj['name']]['score'] or (now - self.best_objects[obj['name']]['frame_time']) > 60:
self.best_objects[obj['name']] = copy.deepcopy(obj)
else:
self.best_objects[obj['name']] = copy.deepcopy(obj)
for name, obj in self.best_objects.items():
if obj['frame_time'] in self.camera.frame_cache:
best_frame = self.camera.frame_cache[obj['frame_time']]
draw_box_with_label(best_frame, obj['box']['xmin'], obj['box']['ymin'],
obj['box']['xmax'], obj['box']['ymax'], obj['name'], "{}% {}".format(int(obj['score']*100), obj['area']))
# print a timestamp
if self.camera.snapshot_config['show_timestamp']:
time_to_show = datetime.datetime.fromtimestamp(obj['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
cv2.putText(best_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
self.best_frames[name] = best_frame

View File

@@ -1,26 +1,161 @@
import datetime
import collections
import numpy as np
import cv2
import threading
import matplotlib.pyplot as plt
# Function to read labels from text files.
def ReadLabelFile(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
def calculate_region(frame_shape, xmin, ymin, xmax, ymax):
# size is larger than longest edge
size = int(max(xmax-xmin, ymax-ymin)*2)
# if the size is too big to fit in the frame
if size > min(frame_shape[0], frame_shape[1]):
size = min(frame_shape[0], frame_shape[1])
# x_offset is midpoint of bounding box minus half the size
x_offset = int((xmax-xmin)/2.0+xmin-size/2.0)
# if outside the image
if x_offset < 0:
x_offset = 0
elif x_offset > (frame_shape[1]-size):
x_offset = (frame_shape[1]-size)
# y_offset is midpoint of bounding box minus half the size
y_offset = int((ymax-ymin)/2.0+ymin-size/2.0)
# if outside the image
if y_offset < 0:
y_offset = 0
elif y_offset > (frame_shape[0]-size):
y_offset = (frame_shape[0]-size)
return (size, x_offset, y_offset)
def compute_intersection_rectangle(box_a, box_b):
return {
'xmin': max(box_a['xmin'], box_b['xmin']),
'ymin': max(box_a['ymin'], box_b['ymin']),
'xmax': min(box_a['xmax'], box_b['xmax']),
'ymax': min(box_a['ymax'], box_b['ymax'])
}
def compute_intersection_over_union(box_a, box_b):
# determine the (x, y)-coordinates of the intersection rectangle
intersect = compute_intersection_rectangle(box_a, box_b)
# compute the area of intersection rectangle
inter_area = max(0, intersect['xmax'] - intersect['xmin'] + 1) * max(0, intersect['ymax'] - intersect['ymin'] + 1)
if inter_area == 0:
return 0.0
# compute the area of both the prediction and ground-truth
# rectangles
box_a_area = (box_a['xmax'] - box_a['xmin'] + 1) * (box_a['ymax'] - box_a['ymin'] + 1)
box_b_area = (box_b['xmax'] - box_b['xmin'] + 1) * (box_b['ymax'] - box_b['ymin'] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = inter_area / float(box_a_area + box_b_area - inter_area)
# return the intersection over union value
return iou
# convert shared memory array into numpy array
def tonumpyarray(mp_arr):
return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label):
color = (255,0,0)
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'):
if color is None:
color = COLOR_MAP[label]
display_text = "{}: {}".format(label, info)
cv2.rectangle(frame, (x_min, y_min),
(x_max, y_max),
color, 2)
color, thickness)
font_scale = 0.5
font = cv2.FONT_HERSHEY_SIMPLEX
# get the width and height of the text box
size = cv2.getTextSize(label, font, fontScale=font_scale, thickness=2)
size = cv2.getTextSize(display_text, font, fontScale=font_scale, thickness=2)
text_width = size[0][0]
text_height = size[0][1]
line_height = text_height + size[1]
# set the text start position
text_offset_x = x_min
text_offset_y = 0 if y_min < line_height else y_min - (line_height+8)
if position == 'ul':
text_offset_x = x_min
text_offset_y = 0 if y_min < line_height else y_min - (line_height+8)
elif position == 'ur':
text_offset_x = x_max - (text_width+8)
text_offset_y = 0 if y_min < line_height else y_min - (line_height+8)
elif position == 'bl':
text_offset_x = x_min
text_offset_y = y_max
elif position == 'br':
text_offset_x = x_max - (text_width+8)
text_offset_y = y_max
# make the coords of the box with a small padding of two pixels
textbox_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y + line_height))
cv2.rectangle(frame, textbox_coords[0], textbox_coords[1], color, cv2.FILLED)
cv2.putText(frame, label, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
cv2.putText(frame, display_text, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = '/label_map.pbtext'
LABELS = ReadLabelFile(PATH_TO_LABELS)
cmap = plt.cm.get_cmap('tab10', len(LABELS.keys()))
COLOR_MAP = {}
for key, val in LABELS.items():
COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
class QueueMerger():
def __init__(self, from_queues, to_queue):
self.from_queues = from_queues
self.to_queue = to_queue
self.merge_threads = []
def start(self):
for from_q in self.from_queues:
self.merge_threads.append(QueueTransfer(from_q,self.to_queue))
class QueueTransfer(threading.Thread):
def __init__(self, from_queue, to_queue):
threading.Thread.__init__(self)
self.from_queue = from_queue
self.to_queue = to_queue
def run(self):
while True:
self.to_queue.put(self.from_queue.get())
class EventsPerSecond:
def __init__(self, max_events=1000):
self._start = None
self._max_events = max_events
self._timestamps = []
def start(self):
self._start = datetime.datetime.now().timestamp()
def update(self):
self._timestamps.append(datetime.datetime.now().timestamp())
# truncate the list when it goes 100 over the max_size
if len(self._timestamps) > self._max_events+100:
self._timestamps = self._timestamps[(1-self._max_events):]
def eps(self, last_n_seconds=10):
# compute the (approximate) events in the last n seconds
now = datetime.datetime.now().timestamp()
seconds = min(now-self._start, last_n_seconds)
return len([t for t in self._timestamps if t > (now-last_n_seconds)]) / seconds

View File

@@ -2,53 +2,70 @@ import os
import time
import datetime
import cv2
import queue
import threading
import ctypes
import multiprocessing as mp
import subprocess as sp
import numpy as np
from . util import tonumpyarray, draw_box_with_label
from . object_detection import FramePrepper
from . objects import ObjectCleaner, BestPersonFrame
from . mqtt import MqttObjectPublisher
import prctl
import copy
import itertools
import json
from collections import defaultdict
from frigate.util import tonumpyarray, LABELS, draw_box_with_label, calculate_region, EventsPerSecond
from frigate.object_detection import RegionPrepper, RegionRequester
from frigate.objects import ObjectCleaner, BestFrames, DetectedObjectsProcessor, RegionRefiner, ObjectTracker
from frigate.mqtt import MqttObjectPublisher
# Stores 2 seconds worth of frames when motion is detected so they can be used for other threads
# Stores 2 seconds worth of frames so they can be used for other threads
class FrameTracker(threading.Thread):
def __init__(self, shared_frame, frame_time, frame_ready, frame_lock, recent_frames):
def __init__(self, frame_time, frame_ready, frame_lock, recent_frames):
threading.Thread.__init__(self)
self.shared_frame = shared_frame
self.frame_time = frame_time
self.frame_ready = frame_ready
self.frame_lock = frame_lock
self.recent_frames = recent_frames
def run(self):
frame_time = 0.0
prctl.set_name(self.__class__.__name__)
while True:
now = datetime.datetime.now().timestamp()
# wait for a frame
with self.frame_ready:
# if there isnt a frame ready for processing or it is old, wait for a signal
if self.frame_time.value == frame_time or (now - self.frame_time.value) > 0.5:
self.frame_ready.wait()
# lock and make a copy of the frame
with self.frame_lock:
frame = self.shared_frame.copy()
frame_time = self.frame_time.value
# add the frame to recent frames
self.recent_frames[frame_time] = frame
self.frame_ready.wait()
# delete any old frames
stored_frame_times = list(self.recent_frames.keys())
for k in stored_frame_times:
if (now - k) > 2:
stored_frame_times.sort(reverse=True)
if len(stored_frame_times) > 100:
frames_to_delete = stored_frame_times[50:]
for k in frames_to_delete:
del self.recent_frames[k]
def get_frame_shape(source):
# capture a single frame and check the frame shape so the correct array
# size can be allocated in memory
ffprobe_cmd = " ".join([
'ffprobe',
'-v',
'panic',
'-show_error',
'-show_streams',
'-of',
'json',
'"'+source+'"'
])
print(ffprobe_cmd)
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
info = json.loads(output)
print(info)
video_info = [s for s in info['streams'] if s['codec_type'] == 'video'][0]
if video_info['height'] != 0 and video_info['width'] != 0:
return (video_info['height'], video_info['width'], 3)
# fallback to using opencv if ffprobe didnt succeed
video = cv2.VideoCapture(source)
ret, frame = video.read()
frame_shape = frame.shape
@@ -65,13 +82,13 @@ class CameraWatchdog(threading.Thread):
self.camera = camera
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
# wait a bit before checking
time.sleep(10)
if (datetime.datetime.now().timestamp() - self.camera.frame_time.value) > 10:
print("last frame is more than 10 seconds old, restarting camera capture...")
if self.camera.frame_time.value != 0.0 and (datetime.datetime.now().timestamp() - self.camera.frame_time.value) > self.camera.watchdog_timeout:
print(self.camera.name + ": last frame is more than 5 minutes old, restarting camera capture...")
self.camera.start_or_restart_capture()
time.sleep(5)
@@ -82,16 +99,17 @@ class CameraCapture(threading.Thread):
self.camera = camera
def run(self):
prctl.set_name(self.__class__.__name__)
frame_num = 0
while True:
if self.camera.ffmpeg_process.poll() != None:
print("ffmpeg process is not running. exiting capture thread...")
print(self.camera.name + ": ffmpeg process is not running. exiting capture thread...")
break
raw_image = self.camera.ffmpeg_process.stdout.read(self.camera.frame_size)
if len(raw_image) == 0:
print("ffmpeg didnt return a frame. something is wrong. exiting capture thread...")
print(self.camera.name + ": ffmpeg didnt return a frame. something is wrong. exiting capture thread...")
break
frame_num += 1
@@ -99,23 +117,51 @@ class CameraCapture(threading.Thread):
continue
with self.camera.frame_lock:
# TODO: use frame_queue instead
self.camera.frame_time.value = datetime.datetime.now().timestamp()
self.camera.current_frame[:] = (
self.camera.frame_cache[self.camera.frame_time.value] = (
np
.frombuffer(raw_image, np.uint8)
.reshape(self.camera.frame_shape)
)
self.camera.frame_queue.put(self.camera.frame_time.value)
# Notify with the condition that a new frame is ready
with self.camera.frame_ready:
self.camera.frame_ready.notify_all()
self.camera.fps.update()
class VideoWriter(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
(frame_time, tracked_objects) = self.camera.frame_output_queue.get()
# if len(tracked_objects) == 0:
# continue
# f = open(f"/debug/output/{self.camera.name}-{str(format(frame_time, '.8f'))}.jpg", 'wb')
# f.write(self.camera.frame_with_objects(frame_time, tracked_objects))
# f.close()
class Camera:
def __init__(self, name, ffmpeg_config, config, prepped_frame_queue, mqtt_client, mqtt_prefix):
def __init__(self, name, ffmpeg_config, global_objects_config, config, prepped_frame_queue, mqtt_client, mqtt_prefix):
self.name = name
self.config = config
self.detected_objects = []
self.recent_frames = {}
self.detected_objects = defaultdict(lambda: [])
self.frame_cache = {}
self.last_processed_frame = None
# queue for re-assembling frames in order
self.frame_queue = queue.Queue()
# track how many regions have been requested for a frame so we know when a frame is complete
self.regions_in_process = {}
# Lock to control access
self.regions_in_process_lock = mp.Lock()
self.finished_frame_queue = queue.Queue()
self.refined_frame_queue = queue.Queue()
self.frame_output_queue = queue.Queue()
self.ffmpeg = config.get('ffmpeg', {})
self.ffmpeg_input = get_ffmpeg_input(self.ffmpeg['input'])
@@ -124,67 +170,104 @@ class Camera:
self.ffmpeg_input_args = self.ffmpeg.get('input_args', ffmpeg_config['input_args'])
self.ffmpeg_output_args = self.ffmpeg.get('output_args', ffmpeg_config['output_args'])
camera_objects_config = config.get('objects', {})
self.take_frame = self.config.get('take_frame', 1)
self.watchdog_timeout = self.config.get('watchdog_timeout', 300)
self.snapshot_config = {
'show_timestamp': self.config.get('snapshots', {}).get('show_timestamp', True)
}
self.regions = self.config['regions']
self.frame_shape = get_frame_shape(self.ffmpeg_input)
if 'width' in self.config and 'height' in self.config:
self.frame_shape = (self.config['height'], self.config['width'], 3)
else:
self.frame_shape = get_frame_shape(self.ffmpeg_input)
self.frame_size = self.frame_shape[0] * self.frame_shape[1] * self.frame_shape[2]
self.mqtt_client = mqtt_client
self.mqtt_topic_prefix = '{}/{}'.format(mqtt_prefix, self.name)
# create a numpy array for the current frame in initialize to zeros
self.current_frame = np.zeros(self.frame_shape, np.uint8)
# create shared value for storing the frame_time
self.frame_time = mp.Value('d', 0.0)
# Lock to control access to the frame
self.frame_lock = mp.Lock()
# Condition for notifying that a new frame is ready
self.frame_ready = mp.Condition()
# Condition for notifying that objects were parsed
self.objects_parsed = mp.Condition()
# Condition for notifying that objects were tracked
self.objects_tracked = mp.Condition()
# Queue for prepped frames, max size set to (number of regions * 5)
self.resize_queue = queue.Queue()
# Queue for raw detected objects
self.detected_objects_queue = queue.Queue()
self.detected_objects_processor = DetectedObjectsProcessor(self)
self.detected_objects_processor.start()
# initialize the frame cache
self.cached_frame_with_objects = {
'frame_bytes': [],
'frame_time': 0
}
self.ffmpeg_process = None
self.capture_thread = None
self.fps = EventsPerSecond()
self.skipped_region_tracker = EventsPerSecond()
# for each region, create a separate thread to resize the region and prep for detection
self.detection_prep_threads = []
for region in self.config['regions']:
# set a default threshold of 0.5 if not defined
if not 'threshold' in region:
region['threshold'] = 0.5
if not isinstance(region['threshold'], float):
print('Threshold is not a float. Setting to 0.5 default.')
region['threshold'] = 0.5
self.detection_prep_threads.append(FramePrepper(
self.name,
self.current_frame,
self.frame_time,
self.frame_ready,
self.frame_lock,
region['size'], region['x_offset'], region['y_offset'], region['threshold'],
prepped_frame_queue
))
# start a thread to store recent motion frames for processing
self.frame_tracker = FrameTracker(self.current_frame, self.frame_time,
self.frame_ready, self.frame_lock, self.recent_frames)
# combine tracked objects lists
self.objects_to_track = set().union(global_objects_config.get('track', ['person', 'car', 'truck']), camera_objects_config.get('track', []))
# merge object filters
global_object_filters = global_objects_config.get('filters', {})
camera_object_filters = camera_objects_config.get('filters', {})
objects_with_config = set().union(global_object_filters.keys(), camera_object_filters.keys())
self.object_filters = {}
for obj in objects_with_config:
self.object_filters[obj] = {**global_object_filters.get(obj, {}), **camera_object_filters.get(obj, {})}
# start a thread to track objects
self.object_tracker = ObjectTracker(self, 10)
self.object_tracker.start()
# start a thread to write tracked frames to disk
self.video_writer = VideoWriter(self)
self.video_writer.start()
# start a thread to queue resize requests for regions
self.region_requester = RegionRequester(self)
self.region_requester.start()
# start a thread to cache recent frames for processing
self.frame_tracker = FrameTracker(self.frame_time,
self.frame_ready, self.frame_lock, self.frame_cache)
self.frame_tracker.start()
# start a thread to store the highest scoring recent person frame
self.best_person_frame = BestPersonFrame(self.objects_parsed, self.recent_frames, self.detected_objects)
self.best_person_frame.start()
# start a thread to resize regions
self.region_prepper = RegionPrepper(self, self.frame_cache, self.resize_queue, prepped_frame_queue)
self.region_prepper.start()
# start a thread to store the highest scoring recent frames for monitored object types
self.best_frames = BestFrames(self)
self.best_frames.start()
# start a thread to expire objects from the detected objects list
self.object_cleaner = ObjectCleaner(self.objects_parsed, self.detected_objects)
self.object_cleaner = ObjectCleaner(self)
self.object_cleaner.start()
# start a thread to publish object scores (currently only person)
mqtt_publisher = MqttObjectPublisher(self.mqtt_client, self.mqtt_topic_prefix, self.objects_parsed, self.detected_objects, self.best_person_frame)
# start a thread to refine regions when objects are clipped
self.dynamic_region_fps = EventsPerSecond()
self.region_refiner = RegionRefiner(self)
self.region_refiner.start()
self.dynamic_region_fps.start()
# start a thread to publish object scores
mqtt_publisher = MqttObjectPublisher(self.mqtt_client, self.mqtt_topic_prefix, self)
mqtt_publisher.start()
# create a watchdog thread for capture process
self.watchdog = CameraWatchdog(self)
# load in the mask for person detection
# load in the mask for object detection
if 'mask' in self.config:
self.mask = cv2.imread("/config/{}".format(self.config['mask']), cv2.IMREAD_GRAYSCALE)
else:
@@ -220,6 +303,8 @@ class Camera:
self.capture_thread = CameraCapture(self)
print("Starting a new capture thread...")
self.capture_thread.start()
self.fps.start()
self.skipped_region_tracker.start()
def start_ffmpeg(self):
ffmpeg_cmd = (['ffmpeg'] +
@@ -236,9 +321,6 @@ class Camera:
def start(self):
self.start_or_restart_capture()
# start the object detection prep threads
for detection_prep_thread in self.detection_prep_threads:
detection_prep_thread.start()
self.watchdog.start()
def join(self):
@@ -247,81 +329,76 @@ class Camera:
def get_capture_pid(self):
return self.ffmpeg_process.pid
def add_objects(self, objects):
if len(objects) == 0:
return
def get_best(self, label):
return self.best_frames.best_frames.get(label)
for obj in objects:
# Store object area to use in bounding box labels
obj['area'] = (obj['xmax']-obj['xmin'])*(obj['ymax']-obj['ymin'])
if obj['name'] == 'person':
# find the matching region
region = None
for r in self.regions:
if (
obj['xmin'] >= r['x_offset'] and
obj['ymin'] >= r['y_offset'] and
obj['xmax'] <= r['x_offset']+r['size'] and
obj['ymax'] <= r['y_offset']+r['size']
):
region = r
break
# if the min person area is larger than the
# detected person, don't add it to detected objects
if region and 'min_person_area' in region and region['min_person_area'] > obj['area']:
continue
# if the detected person is larger than the
# max person area, don't add it to detected objects
if region and 'max_person_area' in region and region['max_person_area'] < obj['area']:
continue
# compute the coordinates of the person and make sure
# the location isnt outside the bounds of the image (can happen from rounding)
y_location = min(int(obj['ymax']), len(self.mask)-1)
x_location = min(int((obj['xmax']-obj['xmin'])/2.0)+obj['xmin'], len(self.mask[0])-1)
# if the person is in a masked location, continue
if self.mask[y_location][x_location] == [0]:
continue
self.detected_objects.append(obj)
with self.objects_parsed:
self.objects_parsed.notify_all()
def get_best_person(self):
return self.best_person_frame.best_frame
def stats(self):
return {
'camera_fps': self.fps.eps(60),
'resize_queue': self.resize_queue.qsize(),
'frame_queue': self.frame_queue.qsize(),
'finished_frame_queue': self.finished_frame_queue.qsize(),
'refined_frame_queue': self.refined_frame_queue.qsize(),
'regions_in_process': self.regions_in_process,
'dynamic_regions_per_sec': self.dynamic_region_fps.eps(),
'skipped_regions_per_sec': self.skipped_region_tracker.eps(60)
}
def get_current_frame_with_objects(self):
# make a copy of the current detected objects
detected_objects = self.detected_objects.copy()
# lock and make a copy of the current frame
with self.frame_lock:
frame = self.current_frame.copy()
frame_time = self.frame_time.value
# draw the bounding boxes on the screen
for obj in detected_objects:
label = "{}: {}% {}".format(obj['name'],int(obj['score']*100),int(obj['area']))
draw_box_with_label(frame, obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'], label)
def frame_with_objects(self, frame_time, tracked_objects=None):
if not frame_time in self.frame_cache:
frame = np.zeros(self.frame_shape, np.uint8)
else:
frame = self.frame_cache[frame_time].copy()
detected_objects = self.detected_objects[frame_time].copy()
for region in self.regions:
color = (255,255,255)
cv2.rectangle(frame, (region['x_offset'], region['y_offset']),
(region['x_offset']+region['size'], region['y_offset']+region['size']),
color, 2)
# draw the bounding boxes on the screen
if tracked_objects is None:
with self.object_tracker.tracked_objects_lock:
tracked_objects = copy.deepcopy(self.object_tracker.tracked_objects)
for obj in detected_objects:
draw_box_with_label(frame, obj['box']['xmin'], obj['box']['ymin'], obj['box']['xmax'], obj['box']['ymax'], obj['name'], "{}% {}".format(int(obj['score']*100), obj['area']), thickness=3)
for id, obj in tracked_objects.items():
color = (0, 255,0) if obj['frame_time'] == frame_time else (255, 0, 0)
draw_box_with_label(frame, obj['box']['xmin'], obj['box']['ymin'], obj['box']['xmax'], obj['box']['ymax'], obj['name'], id, color=color, thickness=1, position='bl')
# print a timestamp
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
cv2.putText(frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
# print fps
cv2.putText(frame, str(self.fps.eps())+'FPS', (10, 60), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
# convert to BGR
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
return frame
# encode the image into a jpg
ret, jpg = cv2.imencode('.jpg', frame)
return jpg.tobytes()
def get_current_frame_with_objects(self):
frame_time = self.last_processed_frame
if frame_time == self.cached_frame_with_objects['frame_time']:
return self.cached_frame_with_objects['frame_bytes']
frame_bytes = self.frame_with_objects(frame_time)
self.cached_frame_with_objects = {
'frame_bytes': frame_bytes,
'frame_time': frame_time
}
return frame_bytes

View File

@@ -1,50 +0,0 @@
#!/bin/bash
set -e
CPU_ARCH=$(uname -m)
OS_VERSION=$(uname -v)
echo "CPU_ARCH ${CPU_ARCH}"
echo "OS_VERSION ${OS_VERSION}"
if [[ "${CPU_ARCH}" == "x86_64" ]]; then
echo "Recognized as Linux on x86_64."
LIBEDGETPU_SUFFIX=x86_64
HOST_GNU_TYPE=x86_64-linux-gnu
elif [[ "${CPU_ARCH}" == "armv7l" ]]; then
echo "Recognized as Linux on ARM32 platform."
LIBEDGETPU_SUFFIX=arm32
HOST_GNU_TYPE=arm-linux-gnueabihf
elif [[ "${CPU_ARCH}" == "aarch64" ]]; then
echo "Recognized as generic ARM64 platform."
LIBEDGETPU_SUFFIX=arm64
HOST_GNU_TYPE=aarch64-linux-gnu
fi
if [[ -z "${HOST_GNU_TYPE}" ]]; then
echo "Your platform is not supported."
exit 1
fi
echo "Using maximum operating frequency."
LIBEDGETPU_SRC="libedgetpu/libedgetpu_${LIBEDGETPU_SUFFIX}.so"
LIBEDGETPU_DST="/usr/lib/${HOST_GNU_TYPE}/libedgetpu.so.1.0"
# Runtime library.
echo "Installing Edge TPU runtime library [${LIBEDGETPU_DST}]..."
if [[ -f "${LIBEDGETPU_DST}" ]]; then
echo "File already exists. Replacing it..."
rm -f "${LIBEDGETPU_DST}"
fi
cp -p "${LIBEDGETPU_SRC}" "${LIBEDGETPU_DST}"
ldconfig
echo "Done."
# Python API.
WHEEL=$(ls edgetpu-*-py3-none-any.whl 2>/dev/null)
if [[ $? == 0 ]]; then
echo "Installing Edge TPU Python API..."
python3 -m pip install --no-deps "${WHEEL}"
echo "Done."
fi

View File

@@ -1,5 +0,0 @@
#!/bin/bash
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys D986B59D
echo "deb http://deb.odroid.in/5422-s bionic main" > /etc/apt/sources.list.d/odroid.list