Compare commits

..

4 Commits

Author SHA1 Message Date
blakeblackshear
a7d68a4998 increase queue size and add ability to take every nth frame 2019-04-19 08:23:07 -05:00
blakeblackshear
03e46efcdd add back queue full message 2019-04-19 06:37:29 -05:00
blakeblackshear
27e39edd65 add location masking for detected objects 2019-04-14 11:58:33 -05:00
blakeblackshear
4f829e818e implement person filtering with min/max by y position 2019-04-14 11:28:50 -05:00
15 changed files with 579 additions and 1426 deletions

View File

@@ -1,6 +1 @@
README.md
diagram.png
.gitignore
debug
config/
*.pyc
README.md

1
.github/FUNDING.yml vendored
View File

@@ -1 +0,0 @@
github: blakeblackshear

2
.gitignore vendored
View File

@@ -1,4 +1,2 @@
*.pyc
debug
.vscode
config/config.yml

View File

@@ -1,53 +1,107 @@
FROM ubuntu:18.04
LABEL maintainer "blakeb@blakeshome.com"
FROM ubuntu:16.04
ENV DEBIAN_FRONTEND=noninteractive
# Install packages for apt repo
RUN apt -qq update && apt -qq install --no-install-recommends -y \
apt-transport-https ca-certificates \
gnupg wget \
ffmpeg \
python3 \
python3-pip \
python3-dev \
python3-numpy \
# python-prctl
build-essential libcap-dev \
# pillow-simd
# zlib1g-dev libjpeg-dev \
# VAAPI drivers for Intel hardware accel
libva-drm2 libva2 i965-va-driver vainfo \
&& echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" > /etc/apt/sources.list.d/coral-edgetpu.list \
&& wget -q -O - https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \
&& apt -qq update \
&& echo "libedgetpu1-max libedgetpu/accepted-eula boolean true" | debconf-set-selections \
&& apt -qq install --no-install-recommends -y \
libedgetpu1-max \
python3-edgetpu \
&& rm -rf /var/lib/apt/lists/* \
&& (apt-get autoremove -y; apt-get autoclean -y)
# Install system packages
RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y python3 \
python3-dev \
python-pil \
python-lxml \
python-tk \
build-essential \
cmake \
git \
libgtk2.0-dev \
pkg-config \
libavcodec-dev \
libavformat-dev \
libswscale-dev \
libtbb2 \
libtbb-dev \
libjpeg-dev \
libpng-dev \
libtiff-dev \
libjasper-dev \
libdc1394-22-dev \
x11-apps \
wget \
vim \
ffmpeg \
unzip \
libusb-1.0-0-dev \
python3-setuptools \
python3-numpy \
zlib1g-dev \
libgoogle-glog-dev \
swig \
libunwind-dev \
libc++-dev \
libc++abi-dev \
build-essential \
&& rm -rf /var/lib/apt/lists/*
# needs to be installed before others
RUN pip3 install -U wheel setuptools
# Install core packages
RUN wget -q -O /tmp/get-pip.py --no-check-certificate https://bootstrap.pypa.io/get-pip.py && python3 /tmp/get-pip.py
RUN pip install -U pip \
numpy \
pillow \
matplotlib \
notebook \
Flask \
imutils \
paho-mqtt \
PyYAML
RUN pip3 install -U \
opencv-python-headless \
python-prctl \
Flask \
paho-mqtt \
PyYAML \
matplotlib \
scipy
# Install tensorflow models object detection
RUN GIT_SSL_NO_VERIFY=true git clone -q https://github.com/tensorflow/models /usr/local/lib/python3.5/dist-packages/tensorflow/models
RUN wget -q -P /usr/local/src/ --no-check-certificate https://github.com/google/protobuf/releases/download/v3.5.1/protobuf-python-3.5.1.tar.gz
# Download & build protobuf-python
RUN cd /usr/local/src/ \
&& tar xf protobuf-python-3.5.1.tar.gz \
&& rm protobuf-python-3.5.1.tar.gz \
&& cd /usr/local/src/protobuf-3.5.1/ \
&& ./configure \
&& make \
&& make install \
&& ldconfig \
&& rm -rf /usr/local/src/protobuf-3.5.1/
# Download & build OpenCV
RUN wget -q -P /usr/local/src/ --no-check-certificate https://github.com/opencv/opencv/archive/4.0.1.zip
RUN cd /usr/local/src/ \
&& unzip 4.0.1.zip \
&& rm 4.0.1.zip \
&& cd /usr/local/src/opencv-4.0.1/ \
&& mkdir build \
&& cd /usr/local/src/opencv-4.0.1/build \
&& cmake -D CMAKE_INSTALL_TYPE=Release -D CMAKE_INSTALL_PREFIX=/usr/local/ .. \
&& make -j4 \
&& make install \
&& rm -rf /usr/local/src/opencv-4.0.1
# Download and install EdgeTPU libraries
RUN wget -q -O edgetpu_api.tar.gz --no-check-certificate http://storage.googleapis.com/cloud-iot-edge-pretrained-models/edgetpu_api.tar.gz
RUN tar xzf edgetpu_api.tar.gz \
&& cd python-tflite-source \
&& cp -p libedgetpu/libedgetpu_x86_64.so /lib/x86_64-linux-gnu/libedgetpu.so \
&& cp edgetpu/swig/compiled_so/_edgetpu_cpp_wrapper_x86_64.so edgetpu/swig/_edgetpu_cpp_wrapper.so \
&& cp edgetpu/swig/compiled_so/edgetpu_cpp_wrapper.py edgetpu/swig/ \
&& python3 setup.py develop --user
# Minimize image size
RUN (apt-get autoremove -y; \
apt-get autoclean -y)
# symlink the model and labels
RUN wget -q https://github.com/google-coral/edgetpu/raw/master/test_data/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite -O mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite --trust-server-names
RUN wget -q https://dl.google.com/coral/canned_models/coco_labels.txt -O coco_labels.txt --trust-server-names
RUN ln -s mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite /frozen_inference_graph.pb
RUN ln -s /coco_labels.txt /label_map.pbtext
RUN ln -s /python-tflite-source/edgetpu/test_data/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite /frozen_inference_graph.pb
RUN ln -s /python-tflite-source/edgetpu/test_data/coco_labels.txt /label_map.pbtext
# Set TF object detection available
ENV PYTHONPATH "$PYTHONPATH:/usr/local/lib/python3.5/dist-packages/tensorflow/models/research:/usr/local/lib/python3.5/dist-packages/tensorflow/models/research/slim"
RUN cd /usr/local/lib/python3.5/dist-packages/tensorflow/models/research && protoc object_detection/protos/*.proto --python_out=.
WORKDIR /opt/frigate/
ADD frigate frigate/
COPY detect_objects.py .
COPY benchmark.py .
CMD ["python3", "-u", "detect_objects.py"]

View File

@@ -1,7 +1,7 @@
# Frigate - Realtime Object Detection for IP Cameras
# Frigate - Realtime Object Detection for RTSP Cameras
**Note:** This version requires the use of a [Google Coral USB Accelerator](https://coral.withgoogle.com/products/accelerator/)
Uses OpenCV and Tensorflow to perform realtime object detection locally for IP cameras. Designed for integration with HomeAssistant or others via MQTT.
Uses OpenCV and Tensorflow to perform realtime object detection locally for RTSP cameras. Designed for integration with HomeAssistant or others via MQTT.
- Leverages multiprocessing and threads heavily with an emphasis on realtime over processing every frame
- Allows you to define specific regions (squares) in the image to look for objects
@@ -30,9 +30,8 @@ docker run --rm \
--privileged \
-v /dev/bus/usb:/dev/bus/usb \
-v <path_to_config_dir>:/config:ro \
-v /etc/localtime:/etc/localtime:ro \
-p 5000:5000 \
-e FRIGATE_RTSP_PASSWORD='password' \
-e RTSP_PASSWORD='password' \
frigate:latest
```
@@ -45,58 +44,35 @@ Example docker-compose:
image: frigate:latest
volumes:
- /dev/bus/usb:/dev/bus/usb
- /etc/localtime:/etc/localtime:ro
- <path_to_config>:/config
ports:
- "5000:5000"
environment:
FRIGATE_RTSP_PASSWORD: "password"
RTSP_PASSWORD: "password"
```
A `config.yml` file must exist in the `config` directory. See example [here](config/config.example.yml) and device specific info can be found [here](docs/DEVICES.md).
A `config.yml` file must exist in the `config` directory. See example [here](config/config.yml).
Access the mjpeg stream at `http://localhost:5000/<camera_name>` and the best snapshot for any object type with at `http://localhost:5000/<camera_name>/<object_name>/best.jpg`
Access the mjpeg stream at `http://localhost:5000/<camera_name>` and the best person snapshot at `http://localhost:5000/<camera_name>/best_person.jpg`
## Integration with HomeAssistant
```
camera:
- name: Camera Last Person
platform: mqtt
topic: frigate/<camera_name>/person/snapshot
- name: Camera Last Car
platform: mqtt
topic: frigate/<camera_name>/car/snapshot
platform: generic
still_image_url: http://<ip>:5000/<camera_name>/best_person.jpg
binary_sensor:
sensor:
- name: Camera Person
platform: mqtt
state_topic: "frigate/<camera_name>/person"
device_class: motion
state_topic: "frigate/<camera_name>/objects"
value_template: '{{ value_json.person }}'
device_class: moving
availability_topic: "frigate/available"
automation:
- alias: Alert me if a person is detected while armed away
trigger:
platform: state
entity_id: binary_sensor.camera_person
from: 'off'
to: 'on'
condition:
- condition: state
entity_id: alarm_control_panel.home_alarm
state: armed_away
action:
- service: notify.user_telegram
data:
message: "A person was detected."
data:
photo:
- url: http://<ip>:5000/<camera_name>/person/best.jpg
caption: A person was detected.
```
## Tips
- Lower the framerate of the video feed on the camera to reduce the CPU usage for capturing the feed
- Lower the framerate of the RTSP feed on the camera to reduce the CPU usage for capturing the feed
## Future improvements
- [x] Remove motion detection for now

View File

@@ -1,20 +0,0 @@
import statistics
import numpy as np
from edgetpu.detection.engine import DetectionEngine
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = '/frozen_inference_graph.pb'
# Load the edgetpu engine and labels
engine = DetectionEngine(PATH_TO_CKPT)
frame = np.zeros((300,300,3), np.uint8)
flattened_frame = np.expand_dims(frame, axis=0).flatten()
detection_times = []
for x in range(0, 1000):
objects = engine.detect_with_input_tensor(flattened_frame, threshold=0.1, top_k=3)
detection_times.append(engine.get_inference_time())
print("Average inference time: " + str(statistics.mean(detection_times)))

View File

@@ -1,148 +0,0 @@
web_port: 5000
mqtt:
host: mqtt.server.com
topic_prefix: frigate
# client_id: frigate # Optional -- set to override default client id of 'frigate' if running multiple instances
# user: username # Optional -- Uncomment for use
# password: password # Optional -- Uncomment for use
#################
# Default ffmpeg args. Optional and can be overwritten per camera.
# Should work with most RTSP cameras that send h264 video
# Built from the properties below with:
# "ffmpeg" + global_args + input_args + "-i" + input + output_args
#################
# ffmpeg:
# global_args:
# - -hide_banner
# - -loglevel
# - panic
# hwaccel_args: []
# input_args:
# - -avoid_negative_ts
# - make_zero
# - -fflags
# - nobuffer
# - -flags
# - low_delay
# - -strict
# - experimental
# - -fflags
# - +genpts+discardcorrupt
# - -vsync
# - drop
# - -rtsp_transport
# - tcp
# - -stimeout
# - '5000000'
# - -use_wallclock_as_timestamps
# - '1'
# output_args:
# - -vf
# - mpdecimate
# - -f
# - rawvideo
# - -pix_fmt
# - rgb24
####################
# Global object configuration. Applies to all cameras
# unless overridden at the camera levels.
# Keys must be valid labels. By default, the model uses coco (https://dl.google.com/coral/canned_models/coco_labels.txt).
# All labels from the model are reported over MQTT. These values are used to filter out false positives.
# min_area (optional): minimum width*height of the bounding box for the detected person
# max_area (optional): maximum width*height of the bounding box for the detected person
# threshold (optional): The minimum decimal percentage (50% hit = 0.5) for the confidence from tensorflow
####################
objects:
track:
- person
- car
- truck
filters:
person:
min_area: 5000
max_area: 100000
threshold: 0.5
cameras:
back:
ffmpeg:
################
# Source passed to ffmpeg after the -i parameter. Supports anything compatible with OpenCV and FFmpeg.
# Environment variables that begin with 'FRIGATE_' may be referenced in {}
################
input: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
#################
# These values will override default values for just this camera
#################
# global_args: []
# hwaccel_args: []
# input_args: []
# output_args: []
################
## Optionally specify the resolution of the video feed. Frigate will try to auto detect if not specified
################
# height: 1280
# width: 720
################
## Optional mask. Must be the same dimensions as your video feed.
## The mask works by looking at the bottom center of the bounding box for the detected
## person in the image. If that pixel in the mask is a black pixel, it ignores it as a
## false positive. In my mask, the grass and driveway visible from my backdoor camera
## are white. The garage doors, sky, and trees (anywhere it would be impossible for a
## person to stand) are black.
################
# mask: back-mask.bmp
################
# Allows you to limit the framerate within frigate for cameras that do not support
# custom framerates. A value of 1 tells frigate to look at every frame, 2 every 2nd frame,
# 3 every 3rd frame, etc.
################
take_frame: 1
################
# The number of seconds frigate will allow a camera to go without sending a frame before
# assuming the ffmpeg process has a problem and restarting.
################
# watchdog_timeout: 300
################
# Configuration for the snapshot sent over mqtt
################
snapshots:
show_timestamp: True
################
# Camera level object config. This config is merged with the global config above.
################
objects:
track:
- person
filters:
person:
min_area: 5000
max_area: 100000
threshold: 0.5
################
# size: size of the region in pixels
# x_offset/y_offset: position of the upper left corner of your region (top left of image is 0,0)
# Tips: All regions are resized to 300x300 before detection because the model is trained on that size.
# Resizing regions takes CPU power. Ideally, all regions should be as close to 300x300 as possible.
# Defining a region that goes outside the bounds of the image will result in errors.
################
regions:
- size: 350
x_offset: 0
y_offset: 300
- size: 400
x_offset: 350
y_offset: 250
- size: 400
x_offset: 750
y_offset: 250

42
config/config.yml Normal file
View File

@@ -0,0 +1,42 @@
web_port: 5000
mqtt:
host: mqtt.server.com
topic_prefix: frigate
cameras:
back:
rtsp:
user: viewer
host: 10.0.10.10
port: 554
# values that begin with a "$" will be replaced with environment variable
password: $RTSP_PASSWORD
path: /cam/realmonitor?channel=1&subtype=2
regions:
- size: 350
x_offset: 0
y_offset: 300
- size: 400
x_offset: 350
y_offset: 250
- size: 400
x_offset: 750
y_offset: 250
mask: back-mask.bmp
known_sizes:
- y: 300
min: 700
max: 1800
- y: 400
min: 3000
max: 7200
- y: 500
min: 8500
max: 20400
- y: 600
min: 10000
max: 50000
- y: 700
min: 10000
max: 125000

View File

@@ -3,12 +3,11 @@ import time
import queue
import yaml
import numpy as np
from flask import Flask, Response, make_response, jsonify
from flask import Flask, Response, make_response
import paho.mqtt.client as mqtt
from frigate.video import Camera
from frigate.object_detection import PreppedQueueProcessor
from frigate.util import EventsPerSecond
with open('/config/config.yml') as f:
CONFIG = yaml.safe_load(f)
@@ -18,32 +17,6 @@ MQTT_PORT = CONFIG.get('mqtt', {}).get('port', 1883)
MQTT_TOPIC_PREFIX = CONFIG.get('mqtt', {}).get('topic_prefix', 'frigate')
MQTT_USER = CONFIG.get('mqtt', {}).get('user')
MQTT_PASS = CONFIG.get('mqtt', {}).get('password')
MQTT_CLIENT_ID = CONFIG.get('mqtt', {}).get('client_id', 'frigate')
# Set the default FFmpeg config
FFMPEG_CONFIG = CONFIG.get('ffmpeg', {})
FFMPEG_DEFAULT_CONFIG = {
'global_args': FFMPEG_CONFIG.get('global_args',
['-hide_banner','-loglevel','panic']),
'hwaccel_args': FFMPEG_CONFIG.get('hwaccel_args',
[]),
'input_args': FFMPEG_CONFIG.get('input_args',
['-avoid_negative_ts', 'make_zero',
'-fflags', 'nobuffer',
'-flags', 'low_delay',
'-strict', 'experimental',
'-fflags', '+genpts+discardcorrupt',
'-vsync', 'drop',
'-rtsp_transport', 'tcp',
'-stimeout', '5000000',
'-use_wallclock_as_timestamps', '1']),
'output_args': FFMPEG_CONFIG.get('output_args',
['-vf', 'mpdecimate',
'-f', 'rawvideo',
'-pix_fmt', 'rgb24'])
}
GLOBAL_OBJECT_CONFIG = CONFIG.get('objects', {})
WEB_PORT = CONFIG.get('web_port', 5000)
DEBUG = (CONFIG.get('debug', '0') == '1')
@@ -52,18 +25,9 @@ def main():
# connect to mqtt and setup last will
def on_connect(client, userdata, flags, rc):
print("On connect called")
if rc != 0:
if rc == 3:
print ("MQTT Server unavailable")
elif rc == 4:
print ("MQTT Bad username or password")
elif rc == 5:
print ("MQTT Not authorized")
else:
print ("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
# publish a message to signal that the service is running
client.publish(MQTT_TOPIC_PREFIX+'/available', 'online', retain=True)
client = mqtt.Client(client_id=MQTT_CLIENT_ID)
client = mqtt.Client()
client.on_connect = on_connect
client.will_set(MQTT_TOPIC_PREFIX+'/available', payload='offline', qos=1, retain=True)
if not MQTT_USER is None:
@@ -71,23 +35,19 @@ def main():
client.connect(MQTT_HOST, MQTT_PORT, 60)
client.loop_start()
# Queue for prepped frames, max size set to number of regions * 3
prepped_frame_queue = queue.Queue()
# Queue for prepped frames, max size set to (number of cameras * 5)
max_queue_size = len(CONFIG['cameras'].items())*10
prepped_frame_queue = queue.Queue(max_queue_size)
cameras = {}
for name, config in CONFIG['cameras'].items():
cameras[name] = Camera(name, FFMPEG_DEFAULT_CONFIG, GLOBAL_OBJECT_CONFIG, config,
prepped_frame_queue, client, MQTT_TOPIC_PREFIX)
fps_tracker = EventsPerSecond()
cameras[name] = Camera(name, config, prepped_frame_queue, client, MQTT_TOPIC_PREFIX, DEBUG)
prepped_queue_processor = PreppedQueueProcessor(
cameras,
prepped_frame_queue,
fps_tracker
prepped_frame_queue
)
prepped_queue_processor.start()
fps_tracker.start()
for name, camera in cameras.items():
camera.start()
@@ -96,60 +56,35 @@ def main():
# create a flask app that encodes frames a mjpeg on demand
app = Flask(__name__)
@app.route('/')
def ishealthy():
# return a healh
return "Frigate is running. Alive and healthy!"
@app.route('/debug/stats')
def stats():
stats = {
'coral': {
'fps': fps_tracker.eps(),
'inference_speed': prepped_queue_processor.avg_inference_speed,
'queue_length': prepped_frame_queue.qsize()
}
}
for name, camera in cameras.items():
stats[name] = camera.stats()
return jsonify(stats)
@app.route('/<camera_name>/<label>/best.jpg')
def best(camera_name, label):
if camera_name in cameras:
best_frame = cameras[camera_name].get_best(label)
if best_frame is None:
best_frame = np.zeros((720,1280,3), np.uint8)
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR)
ret, jpg = cv2.imencode('.jpg', best_frame)
response = make_response(jpg.tobytes())
response.headers['Content-Type'] = 'image/jpg'
return response
else:
return "Camera named {} not found".format(camera_name), 404
@app.route('/<camera_name>/best_person.jpg')
def best_person(camera_name):
best_person_frame = cameras[camera_name].get_best_person()
if best_person_frame is None:
best_person_frame = np.zeros((720,1280,3), np.uint8)
ret, jpg = cv2.imencode('.jpg', best_person_frame)
response = make_response(jpg.tobytes())
response.headers['Content-Type'] = 'image/jpg'
return response
@app.route('/<camera_name>')
def mjpeg_feed(camera_name):
if camera_name in cameras:
# return a multipart response
return Response(imagestream(camera_name),
mimetype='multipart/x-mixed-replace; boundary=frame')
else:
return "Camera named {} not found".format(camera_name), 404
# return a multipart response
return Response(imagestream(camera_name),
mimetype='multipart/x-mixed-replace; boundary=frame')
def imagestream(camera_name):
while True:
# max out at 1 FPS
time.sleep(1)
# max out at 5 FPS
time.sleep(0.2)
frame = cameras[camera_name].get_current_frame_with_objects()
# encode the image into a jpg
ret, jpg = cv2.imencode('.jpg', frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
app.run(host='0.0.0.0', port=WEB_PORT, debug=False)
camera.join()
if __name__ == '__main__':
main()
main()

View File

@@ -1,74 +0,0 @@
# Configuration Examples
### Default (most RTSP cameras)
This is the default ffmpeg command and should work with most RTSP cameras that send h264 video
```yaml
ffmpeg:
global_args:
- -hide_banner
- -loglevel
- panic
hwaccel_args: []
input_args:
- -avoid_negative_ts
- make_zero
- -fflags
- nobuffer
- -flags
- low_delay
- -strict
- experimental
- -fflags
- +genpts+discardcorrupt
- -vsync
- drop
- -rtsp_transport
- tcp
- -stimeout
- '5000000'
- -use_wallclock_as_timestamps
- '1'
output_args:
- -vf
- mpdecimate
- -f
- rawvideo
- -pix_fmt
- rgb24
```
### RTMP Cameras
The input parameters need to be adjusted for RTMP cameras
```yaml
ffmpeg:
input_args:
- -avoid_negative_ts
- make_zero
- -fflags
- nobuffer
- -flags
- low_delay
- -strict
- experimental
- -fflags
- +genpts+discardcorrupt
- -vsync
- drop
- -use_wallclock_as_timestamps
- '1'
```
### Hardware Acceleration
Intel Quicksync
```yaml
ffmpeg:
hwaccel_args:
- -hwaccel
- vaapi
- -hwaccel_device
- /dev/dri/renderD128
- -hwaccel_output_format
- yuv420p
```

View File

@@ -1,54 +1,33 @@
import json
import cv2
import threading
import prctl
from collections import Counter, defaultdict
import itertools
class MqttObjectPublisher(threading.Thread):
def __init__(self, client, topic_prefix, camera):
def __init__(self, client, topic_prefix, objects_parsed, detected_objects):
threading.Thread.__init__(self)
self.client = client
self.topic_prefix = topic_prefix
self.camera = camera
self.objects_parsed = objects_parsed
self._detected_objects = detected_objects
def run(self):
prctl.set_name(self.__class__.__name__)
current_object_status = defaultdict(lambda: 'OFF')
last_sent_payload = ""
while True:
# wait until objects have been tracked
with self.camera.objects_tracked:
self.camera.objects_tracked.wait()
# count objects with more than 2 entries in history by type
obj_counter = Counter()
for obj in self.camera.object_tracker.tracked_objects.values():
if len(obj['history']) > 1:
obj_counter[obj['name']] += 1
# report on detected objects
for obj_name, count in obj_counter.items():
new_status = 'ON' if count > 0 else 'OFF'
if new_status != current_object_status[obj_name]:
current_object_status[obj_name] = new_status
self.client.publish(self.topic_prefix+'/'+obj_name, new_status, retain=False)
# send the snapshot over mqtt if we have it as well
if obj_name in self.camera.best_frames.best_frames:
best_frame = cv2.cvtColor(self.camera.best_frames.best_frames[obj_name], cv2.COLOR_RGB2BGR)
ret, jpg = cv2.imencode('.jpg', best_frame)
if ret:
jpg_bytes = jpg.tobytes()
self.client.publish(self.topic_prefix+'/'+obj_name+'/snapshot', jpg_bytes, retain=True)
# initialize the payload
payload = {}
# expire any objects that are ON and no longer detected
expired_objects = [obj_name for obj_name, status in current_object_status.items() if status == 'ON' and not obj_name in obj_counter]
for obj_name in expired_objects:
current_object_status[obj_name] = 'OFF'
self.client.publish(self.topic_prefix+'/'+obj_name, 'OFF', retain=False)
# send updated snapshot snapshot over mqtt if we have it as well
if obj_name in self.camera.best_frames.best_frames:
best_frame = cv2.cvtColor(self.camera.best_frames.best_frames[obj_name], cv2.COLOR_RGB2BGR)
ret, jpg = cv2.imencode('.jpg', best_frame)
if ret:
jpg_bytes = jpg.tobytes()
self.client.publish(self.topic_prefix+'/'+obj_name+'/snapshot', jpg_bytes, retain=True)
# wait until objects have been parsed
with self.objects_parsed:
self.objects_parsed.wait()
# add all the person scores in detected objects
detected_objects = self._detected_objects.copy()
person_score = sum([obj['score'] for obj in detected_objects if obj['name'] == 'person'])
# if the person score is more than 100, set person to ON
payload['person'] = 'ON' if int(person_score*100) > 100 else 'OFF'
# send message for objects if different
new_payload = json.dumps(payload, sort_keys=True)
if new_payload != last_sent_payload:
last_sent_payload = new_payload
self.client.publish(self.topic_prefix+'/objects', new_payload, retain=False)

View File

@@ -2,15 +2,27 @@ import datetime
import time
import cv2
import threading
import copy
import prctl
import numpy as np
from edgetpu.detection.engine import DetectionEngine
from . util import tonumpyarray
from frigate.util import tonumpyarray, LABELS, PATH_TO_CKPT, calculate_region
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = '/label_map.pbtext'
# Function to read labels from text files.
def ReadLabelFile(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
class PreppedQueueProcessor(threading.Thread):
def __init__(self, cameras, prepped_frame_queue, fps):
def __init__(self, cameras, prepped_frame_queue):
threading.Thread.__init__(self)
self.cameras = cameras
@@ -18,122 +30,81 @@ class PreppedQueueProcessor(threading.Thread):
# Load the edgetpu engine and labels
self.engine = DetectionEngine(PATH_TO_CKPT)
self.labels = LABELS
self.fps = fps
self.avg_inference_speed = 10
self.labels = ReadLabelFile(PATH_TO_LABELS)
def run(self):
prctl.set_name(self.__class__.__name__)
# process queue...
while True:
frame = self.prepped_frame_queue.get()
# Actual detection.
frame['detected_objects'] = self.engine.detect_with_input_tensor(frame['frame'], threshold=0.2, top_k=5)
self.fps.update()
self.avg_inference_speed = (self.avg_inference_speed*9 + self.engine.get_inference_time())/10
objects = self.engine.DetectWithInputTensor(frame['frame'], threshold=0.5, top_k=3)
# parse and pass detected objects back to the camera
parsed_objects = []
for obj in objects:
box = obj.bounding_box.flatten().tolist()
parsed_objects.append({
'frame_time': frame['frame_time'],
'name': str(self.labels[obj.label_id]),
'score': float(obj.score),
'xmin': int((box[0] * frame['region_size']) + frame['region_x_offset']),
'ymin': int((box[1] * frame['region_size']) + frame['region_y_offset']),
'xmax': int((box[2] * frame['region_size']) + frame['region_x_offset']),
'ymax': int((box[3] * frame['region_size']) + frame['region_y_offset'])
})
self.cameras[frame['camera_name']].add_objects(parsed_objects)
self.cameras[frame['camera_name']].detected_objects_queue.put(frame)
class RegionRequester(threading.Thread):
def __init__(self, camera):
# should this be a region class?
class FramePrepper(threading.Thread):
def __init__(self, camera_name, shared_frame, frame_time, frame_ready,
frame_lock,
region_size, region_x_offset, region_y_offset,
prepped_frame_queue):
threading.Thread.__init__(self)
self.camera = camera
self.camera_name = camera_name
self.shared_frame = shared_frame
self.frame_time = frame_time
self.frame_ready = frame_ready
self.frame_lock = frame_lock
self.region_size = region_size
self.region_x_offset = region_x_offset
self.region_y_offset = region_y_offset
self.prepped_frame_queue = prepped_frame_queue
def run(self):
prctl.set_name(self.__class__.__name__)
frame_time = 0.0
while True:
now = datetime.datetime.now().timestamp()
with self.camera.frame_ready:
with self.frame_ready:
# if there isnt a frame ready for processing or it is old, wait for a new frame
if self.camera.frame_time.value == frame_time or (now - self.camera.frame_time.value) > 0.5:
self.camera.frame_ready.wait()
if self.frame_time.value == frame_time or (now - self.frame_time.value) > 0.5:
self.frame_ready.wait()
# make a copy of the frame_time
frame_time = self.camera.frame_time.value
# grab the current tracked objects
with self.camera.object_tracker.tracked_objects_lock:
tracked_objects = copy.deepcopy(self.camera.object_tracker.tracked_objects).values()
with self.camera.regions_in_process_lock:
self.camera.regions_in_process[frame_time] = len(self.camera.config['regions'])
self.camera.regions_in_process[frame_time] += len(tracked_objects)
for index, region in enumerate(self.camera.config['regions']):
self.camera.resize_queue.put({
'camera_name': self.camera.name,
'frame_time': frame_time,
'region_id': index,
'size': region['size'],
'x_offset': region['x_offset'],
'y_offset': region['y_offset']
})
# make a copy of the cropped frame
with self.frame_lock:
cropped_frame = self.shared_frame[self.region_y_offset:self.region_y_offset+self.region_size, self.region_x_offset:self.region_x_offset+self.region_size].copy()
frame_time = self.frame_time.value
# request a region for tracked objects
for tracked_object in tracked_objects:
box = tracked_object['box']
# calculate a new region that will hopefully get the entire object
(size, x_offset, y_offset) = calculate_region(self.camera.frame_shape,
box['xmin'], box['ymin'],
box['xmax'], box['ymax'])
self.camera.resize_queue.put({
'camera_name': self.camera.name,
'frame_time': frame_time,
'region_id': -1,
'size': size,
'x_offset': x_offset,
'y_offset': y_offset
})
class RegionPrepper(threading.Thread):
def __init__(self, camera, frame_cache, resize_request_queue, prepped_frame_queue):
threading.Thread.__init__(self)
self.camera = camera
self.frame_cache = frame_cache
self.resize_request_queue = resize_request_queue
self.prepped_frame_queue = prepped_frame_queue
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
resize_request = self.resize_request_queue.get()
# if the queue is over 100 items long, only prep dynamic regions
if resize_request['region_id'] != -1 and self.prepped_frame_queue.qsize() > 100:
with self.camera.regions_in_process_lock:
self.camera.regions_in_process[resize_request['frame_time']] -= 1
if self.camera.regions_in_process[resize_request['frame_time']] == 0:
del self.camera.regions_in_process[resize_request['frame_time']]
self.camera.skipped_region_tracker.update()
continue
frame = self.frame_cache.get(resize_request['frame_time'], None)
if frame is None:
print("RegionPrepper: frame_time not in frame_cache")
with self.camera.regions_in_process_lock:
self.camera.regions_in_process[resize_request['frame_time']] -= 1
if self.camera.regions_in_process[resize_request['frame_time']] == 0:
del self.camera.regions_in_process[resize_request['frame_time']]
self.camera.skipped_region_tracker.update()
continue
# make a copy of the region
cropped_frame = frame[resize_request['y_offset']:resize_request['y_offset']+resize_request['size'], resize_request['x_offset']:resize_request['x_offset']+resize_request['size']].copy()
# convert to RGB
cropped_frame_rgb = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2RGB)
# Resize to 300x300 if needed
if cropped_frame.shape != (300, 300, 3):
# TODO: use Pillow-SIMD?
cropped_frame = cv2.resize(cropped_frame, dsize=(300, 300), interpolation=cv2.INTER_LINEAR)
if cropped_frame_rgb.shape != (300, 300, 3):
cropped_frame_rgb = cv2.resize(cropped_frame_rgb, dsize=(300, 300), interpolation=cv2.INTER_LINEAR)
# Expand dimensions since the model expects images to have shape: [1, 300, 300, 3]
frame_expanded = np.expand_dims(cropped_frame, axis=0)
frame_expanded = np.expand_dims(cropped_frame_rgb, axis=0)
# add the frame to the queue
resize_request['frame'] = frame_expanded.flatten().copy()
self.prepped_frame_queue.put(resize_request)
if not self.prepped_frame_queue.full():
self.prepped_frame_queue.put({
'camera_name': self.camera_name,
'frame_time': frame_time,
'frame': frame_expanded.flatten().copy(),
'region_size': self.region_size,
'region_x_offset': self.region_x_offset,
'region_y_offset': self.region_y_offset
})
else:
print("queue full. moving on")

View File

@@ -2,417 +2,95 @@ import time
import datetime
import threading
import cv2
import prctl
import itertools
import copy
import numpy as np
import multiprocessing as mp
from collections import defaultdict
from scipy.spatial import distance as dist
from frigate.util import draw_box_with_label, LABELS, compute_intersection_rectangle, compute_intersection_over_union, calculate_region
from object_detection.utils import visualization_utils as vis_util
class ObjectCleaner(threading.Thread):
def __init__(self, camera):
def __init__(self, objects_parsed, detected_objects):
threading.Thread.__init__(self)
self.camera = camera
self._objects_parsed = objects_parsed
self._detected_objects = detected_objects
def run(self):
prctl.set_name("ObjectCleaner")
while True:
# wait a bit before checking for expired frames
time.sleep(0.2)
for frame_time in list(self.camera.detected_objects.keys()).copy():
if not frame_time in self.camera.frame_cache:
del self.camera.detected_objects[frame_time]
objects_deregistered = False
with self.camera.object_tracker.tracked_objects_lock:
now = datetime.datetime.now().timestamp()
for id, obj in list(self.camera.object_tracker.tracked_objects.items()):
# if the object is more than 10 seconds old
# and not in the most recent frame, deregister
if (now - obj['frame_time']) > 10 and self.camera.object_tracker.most_recent_frame_time > obj['frame_time']:
self.camera.object_tracker.deregister(id)
objects_deregistered = True
if objects_deregistered:
with self.camera.objects_tracked:
self.camera.objects_tracked.notify_all()
# expire the objects that are more than 1 second old
now = datetime.datetime.now().timestamp()
# look for the first object found within the last second
# (newest objects are appended to the end)
detected_objects = self._detected_objects.copy()
class DetectedObjectsProcessor(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
frame = self.camera.detected_objects_queue.get()
objects = frame['detected_objects']
for raw_obj in objects:
name = str(LABELS[raw_obj.label_id])
if not name in self.camera.objects_to_track:
continue
obj = {
'name': name,
'score': float(raw_obj.score),
'box': {
'xmin': int((raw_obj.bounding_box[0][0] * frame['size']) + frame['x_offset']),
'ymin': int((raw_obj.bounding_box[0][1] * frame['size']) + frame['y_offset']),
'xmax': int((raw_obj.bounding_box[1][0] * frame['size']) + frame['x_offset']),
'ymax': int((raw_obj.bounding_box[1][1] * frame['size']) + frame['y_offset'])
},
'region': {
'xmin': frame['x_offset'],
'ymin': frame['y_offset'],
'xmax': frame['x_offset']+frame['size'],
'ymax': frame['y_offset']+frame['size']
},
'frame_time': frame['frame_time'],
'region_id': frame['region_id']
}
# if the object is within 5 pixels of the region border, and the region is not on the edge
# consider the object to be clipped
obj['clipped'] = False
if ((obj['region']['xmin'] > 5 and obj['box']['xmin']-obj['region']['xmin'] <= 5) or
(obj['region']['ymin'] > 5 and obj['box']['ymin']-obj['region']['ymin'] <= 5) or
(self.camera.frame_shape[1]-obj['region']['xmax'] > 5 and obj['region']['xmax']-obj['box']['xmax'] <= 5) or
(self.camera.frame_shape[0]-obj['region']['ymax'] > 5 and obj['region']['ymax']-obj['box']['ymax'] <= 5)):
obj['clipped'] = True
# Compute the area
# TODO: +1 right?
obj['area'] = (obj['box']['xmax']-obj['box']['xmin'])*(obj['box']['ymax']-obj['box']['ymin'])
self.camera.detected_objects[frame['frame_time']].append(obj)
# TODO: use in_process and processed counts instead to avoid lock
with self.camera.regions_in_process_lock:
if frame['frame_time'] in self.camera.regions_in_process:
self.camera.regions_in_process[frame['frame_time']] -= 1
# print(f"{frame['frame_time']} remaining regions {self.camera.regions_in_process[frame['frame_time']]}")
if self.camera.regions_in_process[frame['frame_time']] == 0:
del self.camera.regions_in_process[frame['frame_time']]
# print(f"{frame['frame_time']} no remaining regions")
self.camera.finished_frame_queue.put(frame['frame_time'])
else:
self.camera.finished_frame_queue.put(frame['frame_time'])
# Thread that checks finished frames for clipped objects and sends back
# for processing if needed
# TODO: evaluate whether or not i really need separate threads/queues for each step
# given that only 1 thread will really be able to run at a time. you need a
# separate process to actually do things in parallel for when you are CPU bound.
# threads are good when you are waiting and could be processing while you wait
class RegionRefiner(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
frame_time = self.camera.finished_frame_queue.get()
detected_objects = self.camera.detected_objects[frame_time].copy()
# print(f"{frame_time} finished")
# group by name
detected_object_groups = defaultdict(lambda: [])
num_to_delete = 0
for obj in detected_objects:
detected_object_groups[obj['name']].append(obj)
if now-obj['frame_time']<2:
break
num_to_delete += 1
if num_to_delete > 0:
del self._detected_objects[:num_to_delete]
look_again = False
selected_objects = []
for group in detected_object_groups.values():
# notify that parsed objects were changed
with self._objects_parsed:
self._objects_parsed.notify_all()
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
boxes = [(o['box']['xmin'], o['box']['ymin'], o['box']['xmax']-o['box']['xmin'], o['box']['ymax']-o['box']['ymin'])
for o in group]
confidences = [o['score'] for o in group]
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
for index in idxs:
obj = group[index[0]]
selected_objects.append(obj)
if obj['clipped']:
box = obj['box']
# calculate a new region that will hopefully get the entire object
(size, x_offset, y_offset) = calculate_region(self.camera.frame_shape,
box['xmin'], box['ymin'],
box['xmax'], box['ymax'])
# print(f"{frame_time} new region: {size} {x_offset} {y_offset}")
# Maintains the frame and person with the highest score from the most recent
# motion event
class BestPersonFrame(threading.Thread):
def __init__(self, objects_parsed, recent_frames, detected_objects):
threading.Thread.__init__(self)
self.objects_parsed = objects_parsed
self.recent_frames = recent_frames
self.detected_objects = detected_objects
self.best_person = None
self.best_frame = None
with self.camera.regions_in_process_lock:
if not frame_time in self.camera.regions_in_process:
self.camera.regions_in_process[frame_time] = 1
else:
self.camera.regions_in_process[frame_time] += 1
def run(self):
while True:
# add it to the queue
self.camera.resize_queue.put({
'camera_name': self.camera.name,
'frame_time': frame_time,
'region_id': -1,
'size': size,
'x_offset': x_offset,
'y_offset': y_offset
})
self.camera.dynamic_region_fps.update()
look_again = True
# wait until objects have been parsed
with self.objects_parsed:
self.objects_parsed.wait()
# if we are looking again, then this frame is not ready for processing
if look_again:
# remove the clipped objects
self.camera.detected_objects[frame_time] = [o for o in selected_objects if not o['clipped']]
# make a copy of detected objects
detected_objects = self.detected_objects.copy()
detected_people = [obj for obj in detected_objects if obj['name'] == 'person']
# get the highest scoring person
new_best_person = max(detected_people, key=lambda x:x['score'], default=self.best_person)
# if there isnt a person, continue
if new_best_person is None:
continue
# filter objects based on camera settings
selected_objects = [o for o in selected_objects if not self.filtered(o)]
self.camera.detected_objects[frame_time] = selected_objects
# if there is no current best_person
if self.best_person is None:
self.best_person = new_best_person
# if there is already a best_person
else:
now = datetime.datetime.now().timestamp()
# if the new best person is a higher score than the current best person
# or the current person is more than 1 minute old, use the new best person
if new_best_person['score'] > self.best_person['score'] or (now - self.best_person['frame_time']) > 60:
self.best_person = new_best_person
# print(f"{frame_time} is actually finished")
# keep adding frames to the refined queue as long as they are finished
with self.camera.regions_in_process_lock:
while self.camera.frame_queue.qsize() > 0 and self.camera.frame_queue.queue[0] not in self.camera.regions_in_process:
self.camera.last_processed_frame = self.camera.frame_queue.get()
self.camera.refined_frame_queue.put(self.camera.last_processed_frame)
def filtered(self, obj):
object_name = obj['name']
if object_name in self.camera.object_filters:
obj_settings = self.camera.object_filters[object_name]
# if the min area is larger than the
# detected object, don't add it to detected objects
if obj_settings.get('min_area',-1) > obj['area']:
return True
# make a copy of the recent frames
recent_frames = self.recent_frames.copy()
# if the detected object is larger than the
# max area, don't add it to detected objects
if obj_settings.get('max_area', self.camera.frame_shape[0]*self.camera.frame_shape[1]) < obj['area']:
return True
if not self.best_person is None and self.best_person['frame_time'] in recent_frames:
best_frame = recent_frames[self.best_person['frame_time']]
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_BGR2RGB)
# draw the bounding box on the frame
vis_util.draw_bounding_box_on_image_array(best_frame,
self.best_person['ymin'],
self.best_person['xmin'],
self.best_person['ymax'],
self.best_person['xmax'],
color='red',
thickness=2,
display_str_list=["{}: {}%".format(self.best_person['name'],int(self.best_person['score']*100))],
use_normalized_coordinates=False)
# if the score is lower than the threshold, skip
if obj_settings.get('threshold', 0) > obj['score']:
return True
# compute the coordinates of the object and make sure
# the location isnt outside the bounds of the image (can happen from rounding)
y_location = min(int(obj['box']['ymax']), len(self.camera.mask)-1)
x_location = min(int((obj['box']['xmax']-obj['box']['xmin'])/2.0)+obj['box']['xmin'], len(self.camera.mask[0])-1)
# if the object is in a masked location, don't add it to detected objects
if self.camera.mask[y_location][x_location] == [0]:
return True
return False
def has_overlap(self, new_obj, obj, overlap=.7):
# compute intersection rectangle with existing object and new objects region
existing_obj_current_region = compute_intersection_rectangle(obj['box'], new_obj['region'])
# compute intersection rectangle with new object and existing objects region
new_obj_existing_region = compute_intersection_rectangle(new_obj['box'], obj['region'])
# compute iou for the two intersection rectangles that were just computed
iou = compute_intersection_over_union(existing_obj_current_region, new_obj_existing_region)
# if intersection is greater than overlap
if iou > overlap:
return True
else:
return False
def find_group(self, new_obj, groups):
for index, group in enumerate(groups):
for obj in group:
if self.has_overlap(new_obj, obj):
return index
return None
class ObjectTracker(threading.Thread):
def __init__(self, camera, max_disappeared):
threading.Thread.__init__(self)
self.camera = camera
self.tracked_objects = {}
self.tracked_objects_lock = mp.Lock()
self.most_recent_frame_time = None
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
frame_time = self.camera.refined_frame_queue.get()
with self.tracked_objects_lock:
self.match_and_update(self.camera.detected_objects[frame_time])
self.most_recent_frame_time = frame_time
self.camera.frame_output_queue.put((frame_time, copy.deepcopy(self.tracked_objects)))
if len(self.tracked_objects) > 0:
with self.camera.objects_tracked:
self.camera.objects_tracked.notify_all()
def register(self, index, obj):
id = "{}-{}".format(str(obj['frame_time']), index)
obj['id'] = id
obj['top_score'] = obj['score']
self.add_history(obj)
self.tracked_objects[id] = obj
def deregister(self, id):
del self.tracked_objects[id]
def update(self, id, new_obj):
self.tracked_objects[id].update(new_obj)
self.add_history(self.tracked_objects[id])
if self.tracked_objects[id]['score'] > self.tracked_objects[id]['top_score']:
self.tracked_objects[id]['top_score'] = self.tracked_objects[id]['score']
def add_history(self, obj):
entry = {
'score': obj['score'],
'box': obj['box'],
'region': obj['region'],
'centroid': obj['centroid'],
'frame_time': obj['frame_time']
}
if 'history' in obj:
obj['history'].append(entry)
else:
obj['history'] = [entry]
def match_and_update(self, new_objects):
if len(new_objects) == 0:
return
# group by name
new_object_groups = defaultdict(lambda: [])
for obj in new_objects:
new_object_groups[obj['name']].append(obj)
# track objects for each label type
for label, group in new_object_groups.items():
current_objects = [o for o in self.tracked_objects.values() if o['name'] == label]
current_ids = [o['id'] for o in current_objects]
current_centroids = np.array([o['centroid'] for o in current_objects])
# compute centroids of new objects
for obj in group:
centroid_x = int((obj['box']['xmin']+obj['box']['xmax']) / 2.0)
centroid_y = int((obj['box']['ymin']+obj['box']['ymax']) / 2.0)
obj['centroid'] = (centroid_x, centroid_y)
if len(current_objects) == 0:
for index, obj in enumerate(group):
self.register(index, obj)
return
new_centroids = np.array([o['centroid'] for o in group])
# compute the distance between each pair of tracked
# centroids and new centroids, respectively -- our
# goal will be to match each new centroid to an existing
# object centroid
D = dist.cdist(current_centroids, new_centroids)
# in order to perform this matching we must (1) find the
# smallest value in each row and then (2) sort the row
# indexes based on their minimum values so that the row
# with the smallest value is at the *front* of the index
# list
rows = D.min(axis=1).argsort()
# next, we perform a similar process on the columns by
# finding the smallest value in each column and then
# sorting using the previously computed row index list
cols = D.argmin(axis=1)[rows]
# in order to determine if we need to update, register,
# or deregister an object we need to keep track of which
# of the rows and column indexes we have already examined
usedRows = set()
usedCols = set()
# loop over the combination of the (row, column) index
# tuples
for (row, col) in zip(rows, cols):
# if we have already examined either the row or
# column value before, ignore it
if row in usedRows or col in usedCols:
continue
# otherwise, grab the object ID for the current row,
# set its new centroid, and reset the disappeared
# counter
objectID = current_ids[row]
self.update(objectID, group[col])
# indicate that we have examined each of the row and
# column indexes, respectively
usedRows.add(row)
usedCols.add(col)
# compute the column index we have NOT yet examined
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
# if the number of input centroids is greater
# than the number of existing object centroids we need to
# register each new input centroid as a trackable object
# if D.shape[0] < D.shape[1]:
# TODO: rather than assuming these are new objects, we could
# look to see if any of the remaining boxes have a large amount
# of overlap...
for col in unusedCols:
self.register(col, group[col])
# Maintains the frame and object with the highest score
class BestFrames(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
self.best_objects = {}
self.best_frames = {}
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
# wait until objects have been tracked
with self.camera.objects_tracked:
self.camera.objects_tracked.wait()
# make a copy of tracked objects
tracked_objects = list(self.camera.object_tracker.tracked_objects.values())
for obj in tracked_objects:
if obj['name'] in self.best_objects:
now = datetime.datetime.now().timestamp()
# if the object is a higher score than the current best score
# or the current object is more than 1 minute old, use the new object
if obj['score'] > self.best_objects[obj['name']]['score'] or (now - self.best_objects[obj['name']]['frame_time']) > 60:
self.best_objects[obj['name']] = copy.deepcopy(obj)
else:
self.best_objects[obj['name']] = copy.deepcopy(obj)
for name, obj in self.best_objects.items():
if obj['frame_time'] in self.camera.frame_cache:
best_frame = self.camera.frame_cache[obj['frame_time']]
draw_box_with_label(best_frame, obj['box']['xmin'], obj['box']['ymin'],
obj['box']['xmax'], obj['box']['ymax'], obj['name'], "{}% {}".format(int(obj['score']*100), obj['area']))
# print a timestamp
if self.camera.snapshot_config['show_timestamp']:
time_to_show = datetime.datetime.fromtimestamp(obj['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
cv2.putText(best_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
self.best_frames[name] = best_frame
# convert back to BGR
self.best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR)

View File

@@ -1,161 +1,5 @@
import datetime
import collections
import numpy as np
import cv2
import threading
import matplotlib.pyplot as plt
# Function to read labels from text files.
def ReadLabelFile(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
def calculate_region(frame_shape, xmin, ymin, xmax, ymax):
# size is larger than longest edge
size = int(max(xmax-xmin, ymax-ymin)*2)
# if the size is too big to fit in the frame
if size > min(frame_shape[0], frame_shape[1]):
size = min(frame_shape[0], frame_shape[1])
# x_offset is midpoint of bounding box minus half the size
x_offset = int((xmax-xmin)/2.0+xmin-size/2.0)
# if outside the image
if x_offset < 0:
x_offset = 0
elif x_offset > (frame_shape[1]-size):
x_offset = (frame_shape[1]-size)
# y_offset is midpoint of bounding box minus half the size
y_offset = int((ymax-ymin)/2.0+ymin-size/2.0)
# if outside the image
if y_offset < 0:
y_offset = 0
elif y_offset > (frame_shape[0]-size):
y_offset = (frame_shape[0]-size)
return (size, x_offset, y_offset)
def compute_intersection_rectangle(box_a, box_b):
return {
'xmin': max(box_a['xmin'], box_b['xmin']),
'ymin': max(box_a['ymin'], box_b['ymin']),
'xmax': min(box_a['xmax'], box_b['xmax']),
'ymax': min(box_a['ymax'], box_b['ymax'])
}
def compute_intersection_over_union(box_a, box_b):
# determine the (x, y)-coordinates of the intersection rectangle
intersect = compute_intersection_rectangle(box_a, box_b)
# compute the area of intersection rectangle
inter_area = max(0, intersect['xmax'] - intersect['xmin'] + 1) * max(0, intersect['ymax'] - intersect['ymin'] + 1)
if inter_area == 0:
return 0.0
# compute the area of both the prediction and ground-truth
# rectangles
box_a_area = (box_a['xmax'] - box_a['xmin'] + 1) * (box_a['ymax'] - box_a['ymin'] + 1)
box_b_area = (box_b['xmax'] - box_b['xmin'] + 1) * (box_b['ymax'] - box_b['ymin'] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = inter_area / float(box_a_area + box_b_area - inter_area)
# return the intersection over union value
return iou
# convert shared memory array into numpy array
def tonumpyarray(mp_arr):
return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'):
if color is None:
color = COLOR_MAP[label]
display_text = "{}: {}".format(label, info)
cv2.rectangle(frame, (x_min, y_min),
(x_max, y_max),
color, thickness)
font_scale = 0.5
font = cv2.FONT_HERSHEY_SIMPLEX
# get the width and height of the text box
size = cv2.getTextSize(display_text, font, fontScale=font_scale, thickness=2)
text_width = size[0][0]
text_height = size[0][1]
line_height = text_height + size[1]
# set the text start position
if position == 'ul':
text_offset_x = x_min
text_offset_y = 0 if y_min < line_height else y_min - (line_height+8)
elif position == 'ur':
text_offset_x = x_max - (text_width+8)
text_offset_y = 0 if y_min < line_height else y_min - (line_height+8)
elif position == 'bl':
text_offset_x = x_min
text_offset_y = y_max
elif position == 'br':
text_offset_x = x_max - (text_width+8)
text_offset_y = y_max
# make the coords of the box with a small padding of two pixels
textbox_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y + line_height))
cv2.rectangle(frame, textbox_coords[0], textbox_coords[1], color, cv2.FILLED)
cv2.putText(frame, display_text, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = '/label_map.pbtext'
LABELS = ReadLabelFile(PATH_TO_LABELS)
cmap = plt.cm.get_cmap('tab10', len(LABELS.keys()))
COLOR_MAP = {}
for key, val in LABELS.items():
COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
class QueueMerger():
def __init__(self, from_queues, to_queue):
self.from_queues = from_queues
self.to_queue = to_queue
self.merge_threads = []
def start(self):
for from_q in self.from_queues:
self.merge_threads.append(QueueTransfer(from_q,self.to_queue))
class QueueTransfer(threading.Thread):
def __init__(self, from_queue, to_queue):
threading.Thread.__init__(self)
self.from_queue = from_queue
self.to_queue = to_queue
def run(self):
while True:
self.to_queue.put(self.from_queue.get())
class EventsPerSecond:
def __init__(self, max_events=1000):
self._start = None
self._max_events = max_events
self._timestamps = []
def start(self):
self._start = datetime.datetime.now().timestamp()
def update(self):
self._timestamps.append(datetime.datetime.now().timestamp())
# truncate the list when it goes 100 over the max_size
if len(self._timestamps) > self._max_events+100:
self._timestamps = self._timestamps[(1-self._max_events):]
def eps(self, last_n_seconds=10):
# compute the (approximate) events in the last n seconds
now = datetime.datetime.now().timestamp()
seconds = min(now-self._start, last_n_seconds)
return len([t for t in self._timestamps if t > (now-last_n_seconds)]) / seconds
return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)

View File

@@ -2,355 +2,316 @@ import os
import time
import datetime
import cv2
import queue
import threading
import ctypes
import multiprocessing as mp
import subprocess as sp
import numpy as np
import prctl
import copy
import itertools
import json
from collections import defaultdict
from frigate.util import tonumpyarray, LABELS, draw_box_with_label, calculate_region, EventsPerSecond
from frigate.object_detection import RegionPrepper, RegionRequester
from frigate.objects import ObjectCleaner, BestFrames, DetectedObjectsProcessor, RegionRefiner, ObjectTracker
from frigate.mqtt import MqttObjectPublisher
from object_detection.utils import visualization_utils as vis_util
from . util import tonumpyarray
from . object_detection import FramePrepper
from . objects import ObjectCleaner, BestPersonFrame
from . mqtt import MqttObjectPublisher
# Stores 2 seconds worth of frames so they can be used for other threads
# fetch the frames as fast a possible and store current frame in a shared memory array
def fetch_frames(shared_arr, shared_frame_time, frame_lock, frame_ready, frame_shape, rtsp_url, take_frame=1):
# convert shared memory array into numpy and shape into image array
arr = tonumpyarray(shared_arr).reshape(frame_shape)
# start the video capture
video = cv2.VideoCapture()
video.open(rtsp_url)
# keep the buffer small so we minimize old data
video.set(cv2.CAP_PROP_BUFFERSIZE,1)
bad_frame_counter = 0
frame_num = 0
while True:
# check if the video stream is still open, and reopen if needed
if not video.isOpened():
success = video.open(rtsp_url)
if not success:
time.sleep(1)
continue
# grab the frame, but dont decode it yet
ret = video.grab()
# snapshot the time the frame was grabbed
frame_time = datetime.datetime.now()
if ret:
frame_num += 1
if (frame_num % take_frame) != 0:
continue
# go ahead and decode the current frame
ret, frame = video.retrieve()
if ret:
# Lock access and update frame
with frame_lock:
arr[:] = frame
shared_frame_time.value = frame_time.timestamp()
# Notify with the condition that a new frame is ready
with frame_ready:
frame_ready.notify_all()
bad_frame_counter = 0
else:
print("Unable to decode frame")
bad_frame_counter += 1
else:
print("Unable to grab a frame")
bad_frame_counter += 1
if bad_frame_counter > 100:
video.release()
video.release()
# Stores 2 seconds worth of frames when motion is detected so they can be used for other threads
class FrameTracker(threading.Thread):
def __init__(self, frame_time, frame_ready, frame_lock, recent_frames):
def __init__(self, shared_frame, frame_time, frame_ready, frame_lock, recent_frames):
threading.Thread.__init__(self)
self.shared_frame = shared_frame
self.frame_time = frame_time
self.frame_ready = frame_ready
self.frame_lock = frame_lock
self.recent_frames = recent_frames
def run(self):
prctl.set_name(self.__class__.__name__)
frame_time = 0.0
while True:
now = datetime.datetime.now().timestamp()
# wait for a frame
with self.frame_ready:
self.frame_ready.wait()
# if there isnt a frame ready for processing or it is old, wait for a signal
if self.frame_time.value == frame_time or (now - self.frame_time.value) > 0.5:
self.frame_ready.wait()
# lock and make a copy of the frame
with self.frame_lock:
frame = self.shared_frame.copy()
frame_time = self.frame_time.value
# add the frame to recent frames
self.recent_frames[frame_time] = frame
# delete any old frames
stored_frame_times = list(self.recent_frames.keys())
stored_frame_times.sort(reverse=True)
if len(stored_frame_times) > 100:
frames_to_delete = stored_frame_times[50:]
for k in frames_to_delete:
for k in stored_frame_times:
if (now - k) > 2:
del self.recent_frames[k]
def get_frame_shape(source):
ffprobe_cmd = " ".join([
'ffprobe',
'-v',
'panic',
'-show_error',
'-show_streams',
'-of',
'json',
'"'+source+'"'
])
print(ffprobe_cmd)
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
info = json.loads(output)
print(info)
video_info = [s for s in info['streams'] if s['codec_type'] == 'video'][0]
if video_info['height'] != 0 and video_info['width'] != 0:
return (video_info['height'], video_info['width'], 3)
# fallback to using opencv if ffprobe didnt succeed
video = cv2.VideoCapture(source)
def get_frame_shape(rtsp_url):
# capture a single frame and check the frame shape so the correct array
# size can be allocated in memory
video = cv2.VideoCapture(rtsp_url)
ret, frame = video.read()
frame_shape = frame.shape
video.release()
return frame_shape
def get_ffmpeg_input(ffmpeg_input):
frigate_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
return ffmpeg_input.format(**frigate_vars)
def get_rtsp_url(rtsp_config):
if (rtsp_config['password'].startswith('$')):
rtsp_config['password'] = os.getenv(rtsp_config['password'][1:])
return 'rtsp://{}:{}@{}:{}{}'.format(rtsp_config['user'],
rtsp_config['password'], rtsp_config['host'], rtsp_config['port'],
rtsp_config['path'])
class CameraWatchdog(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
def compute_sizes(frame_shape, known_sizes, mask):
# create a 3 dimensional numpy array to store estimated sizes
estimated_sizes = np.zeros((frame_shape[0], frame_shape[1], 2), np.uint32)
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
# wait a bit before checking
time.sleep(10)
sorted_positions = sorted(known_sizes, key=lambda s: s['y'])
if self.camera.frame_time.value != 0.0 and (datetime.datetime.now().timestamp() - self.camera.frame_time.value) > self.camera.watchdog_timeout:
print(self.camera.name + ": last frame is more than 5 minutes old, restarting camera capture...")
self.camera.start_or_restart_capture()
time.sleep(5)
last_position = {'y': 0, 'min': 0, 'max': 0}
next_position = sorted_positions.pop(0)
# if the next position has the same y coordinate, skip
while next_position['y'] == last_position['y']:
next_position = sorted_positions.pop(0)
y_change = next_position['y']-last_position['y']
min_size_change = next_position['min']-last_position['min']
max_size_change = next_position['max']-last_position['max']
min_step_size = min_size_change/y_change
max_step_size = max_size_change/y_change
# Thread to read the stdout of the ffmpeg process and update the current frame
class CameraCapture(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
min_current_size = 0
max_current_size = 0
def run(self):
prctl.set_name(self.__class__.__name__)
frame_num = 0
while True:
if self.camera.ffmpeg_process.poll() != None:
print(self.camera.name + ": ffmpeg process is not running. exiting capture thread...")
break
for y_position in range(frame_shape[0]):
# fill the row with the estimated size
estimated_sizes[y_position,:] = [min_current_size, max_current_size]
raw_image = self.camera.ffmpeg_process.stdout.read(self.camera.frame_size)
# if you have reached the next size
if y_position == next_position['y']:
last_position = next_position
# if there are still positions left
if len(sorted_positions) > 0:
next_position = sorted_positions.pop(0)
# if the next position has the same y coordinate, skip
while next_position['y'] == last_position['y']:
next_position = sorted_positions.pop(0)
y_change = next_position['y']-last_position['y']
min_size_change = next_position['min']-last_position['min']
max_size_change = next_position['max']-last_position['max']
min_step_size = min_size_change/y_change
max_step_size = max_size_change/y_change
else:
min_step_size = 0
max_step_size = 0
min_current_size += min_step_size
max_current_size += max_step_size
if len(raw_image) == 0:
print(self.camera.name + ": ffmpeg didnt return a frame. something is wrong. exiting capture thread...")
break
# apply mask by filling 0s for all locations a person could not be standing
if mask is not None:
pass
frame_num += 1
if (frame_num % self.camera.take_frame) != 0:
continue
with self.camera.frame_lock:
# TODO: use frame_queue instead
self.camera.frame_time.value = datetime.datetime.now().timestamp()
self.camera.frame_cache[self.camera.frame_time.value] = (
np
.frombuffer(raw_image, np.uint8)
.reshape(self.camera.frame_shape)
)
self.camera.frame_queue.put(self.camera.frame_time.value)
# Notify with the condition that a new frame is ready
with self.camera.frame_ready:
self.camera.frame_ready.notify_all()
self.camera.fps.update()
class VideoWriter(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
def run(self):
prctl.set_name(self.__class__.__name__)
while True:
(frame_time, tracked_objects) = self.camera.frame_output_queue.get()
# if len(tracked_objects) == 0:
# continue
# f = open(f"/debug/output/{self.camera.name}-{str(format(frame_time, '.8f'))}.jpg", 'wb')
# f.write(self.camera.frame_with_objects(frame_time, tracked_objects))
# f.close()
return estimated_sizes
class Camera:
def __init__(self, name, ffmpeg_config, global_objects_config, config, prepped_frame_queue, mqtt_client, mqtt_prefix):
def __init__(self, name, config, prepped_frame_queue, mqtt_client, mqtt_prefix, debug=False):
self.name = name
self.config = config
self.detected_objects = defaultdict(lambda: [])
self.frame_cache = {}
self.last_processed_frame = None
# queue for re-assembling frames in order
self.frame_queue = queue.Queue()
# track how many regions have been requested for a frame so we know when a frame is complete
self.regions_in_process = {}
# Lock to control access
self.regions_in_process_lock = mp.Lock()
self.finished_frame_queue = queue.Queue()
self.refined_frame_queue = queue.Queue()
self.frame_output_queue = queue.Queue()
self.ffmpeg = config.get('ffmpeg', {})
self.ffmpeg_input = get_ffmpeg_input(self.ffmpeg['input'])
self.ffmpeg_global_args = self.ffmpeg.get('global_args', ffmpeg_config['global_args'])
self.ffmpeg_hwaccel_args = self.ffmpeg.get('hwaccel_args', ffmpeg_config['hwaccel_args'])
self.ffmpeg_input_args = self.ffmpeg.get('input_args', ffmpeg_config['input_args'])
self.ffmpeg_output_args = self.ffmpeg.get('output_args', ffmpeg_config['output_args'])
camera_objects_config = config.get('objects', {})
self.detected_objects = []
self.recent_frames = {}
self.rtsp_url = get_rtsp_url(self.config['rtsp'])
self.take_frame = self.config.get('take_frame', 1)
self.watchdog_timeout = self.config.get('watchdog_timeout', 300)
self.snapshot_config = {
'show_timestamp': self.config.get('snapshots', {}).get('show_timestamp', True)
}
self.regions = self.config['regions']
if 'width' in self.config and 'height' in self.config:
self.frame_shape = (self.config['height'], self.config['width'], 3)
else:
self.frame_shape = get_frame_shape(self.ffmpeg_input)
self.frame_size = self.frame_shape[0] * self.frame_shape[1] * self.frame_shape[2]
self.frame_shape = get_frame_shape(self.rtsp_url)
self.mqtt_client = mqtt_client
self.mqtt_topic_prefix = '{}/{}'.format(mqtt_prefix, self.name)
self.debug = debug
# compute the flattened array length from the shape of the frame
flat_array_length = self.frame_shape[0] * self.frame_shape[1] * self.frame_shape[2]
# create shared array for storing the full frame image data
self.shared_frame_array = mp.Array(ctypes.c_uint8, flat_array_length)
# create shared value for storing the frame_time
self.frame_time = mp.Value('d', 0.0)
self.shared_frame_time = mp.Value('d', 0.0)
# Lock to control access to the frame
self.frame_lock = mp.Lock()
# Condition for notifying that a new frame is ready
self.frame_ready = mp.Condition()
# Condition for notifying that objects were tracked
self.objects_tracked = mp.Condition()
# Condition for notifying that objects were parsed
self.objects_parsed = mp.Condition()
# Queue for prepped frames, max size set to (number of regions * 5)
self.resize_queue = queue.Queue()
# shape current frame so it can be treated as a numpy image
self.shared_frame_np = tonumpyarray(self.shared_frame_array).reshape(self.frame_shape)
# Queue for raw detected objects
self.detected_objects_queue = queue.Queue()
self.detected_objects_processor = DetectedObjectsProcessor(self)
self.detected_objects_processor.start()
# create the process to capture frames from the RTSP stream and store in a shared array
self.capture_process = mp.Process(target=fetch_frames, args=(self.shared_frame_array,
self.shared_frame_time, self.frame_lock, self.frame_ready, self.frame_shape,
self.rtsp_url, self.take_frame))
self.capture_process.daemon = True
# initialize the frame cache
self.cached_frame_with_objects = {
'frame_bytes': [],
'frame_time': 0
}
self.ffmpeg_process = None
self.capture_thread = None
self.fps = EventsPerSecond()
self.skipped_region_tracker = EventsPerSecond()
# combine tracked objects lists
self.objects_to_track = set().union(global_objects_config.get('track', ['person', 'car', 'truck']), camera_objects_config.get('track', []))
# merge object filters
global_object_filters = global_objects_config.get('filters', {})
camera_object_filters = camera_objects_config.get('filters', {})
objects_with_config = set().union(global_object_filters.keys(), camera_object_filters.keys())
self.object_filters = {}
for obj in objects_with_config:
self.object_filters[obj] = {**global_object_filters.get(obj, {}), **camera_object_filters.get(obj, {})}
# start a thread to track objects
self.object_tracker = ObjectTracker(self, 10)
self.object_tracker.start()
# start a thread to write tracked frames to disk
self.video_writer = VideoWriter(self)
self.video_writer.start()
# start a thread to queue resize requests for regions
self.region_requester = RegionRequester(self)
self.region_requester.start()
# start a thread to cache recent frames for processing
self.frame_tracker = FrameTracker(self.frame_time,
self.frame_ready, self.frame_lock, self.frame_cache)
# for each region, create a separate thread to resize the region and prep for detection
self.detection_prep_threads = []
for region in self.config['regions']:
self.detection_prep_threads.append(FramePrepper(
self.name,
self.shared_frame_np,
self.shared_frame_time,
self.frame_ready,
self.frame_lock,
region['size'], region['x_offset'], region['y_offset'],
prepped_frame_queue
))
# start a thread to store recent motion frames for processing
self.frame_tracker = FrameTracker(self.shared_frame_np, self.shared_frame_time,
self.frame_ready, self.frame_lock, self.recent_frames)
self.frame_tracker.start()
# start a thread to resize regions
self.region_prepper = RegionPrepper(self, self.frame_cache, self.resize_queue, prepped_frame_queue)
self.region_prepper.start()
# start a thread to store the highest scoring recent frames for monitored object types
self.best_frames = BestFrames(self)
self.best_frames.start()
# start a thread to store the highest scoring recent person frame
self.best_person_frame = BestPersonFrame(self.objects_parsed, self.recent_frames, self.detected_objects)
self.best_person_frame.start()
# start a thread to expire objects from the detected objects list
self.object_cleaner = ObjectCleaner(self)
self.object_cleaner = ObjectCleaner(self.objects_parsed, self.detected_objects)
self.object_cleaner.start()
# start a thread to refine regions when objects are clipped
self.dynamic_region_fps = EventsPerSecond()
self.region_refiner = RegionRefiner(self)
self.region_refiner.start()
self.dynamic_region_fps.start()
# start a thread to publish object scores
mqtt_publisher = MqttObjectPublisher(self.mqtt_client, self.mqtt_topic_prefix, self)
# start a thread to publish object scores (currently only person)
mqtt_publisher = MqttObjectPublisher(self.mqtt_client, self.mqtt_topic_prefix, self.objects_parsed, self.detected_objects)
mqtt_publisher.start()
# create a watchdog thread for capture process
self.watchdog = CameraWatchdog(self)
# load in the mask for object detection
# load in the mask for person detection
if 'mask' in self.config:
self.mask = cv2.imread("/config/{}".format(self.config['mask']), cv2.IMREAD_GRAYSCALE)
else:
self.mask = None
if self.mask is None:
self.mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
self.mask[:] = 255
def start_or_restart_capture(self):
if not self.ffmpeg_process is None:
print("Terminating the existing ffmpeg process...")
self.ffmpeg_process.terminate()
try:
print("Waiting for ffmpeg to exit gracefully...")
self.ffmpeg_process.wait(timeout=30)
except sp.TimeoutExpired:
print("FFmpeg didnt exit. Force killing...")
self.ffmpeg_process.kill()
self.ffmpeg_process.wait()
print("Waiting for the capture thread to exit...")
self.capture_thread.join()
self.ffmpeg_process = None
self.capture_thread = None
# create the process to capture frames from the input stream and store in a shared array
print("Creating a new ffmpeg process...")
self.start_ffmpeg()
print("Creating a new capture thread...")
self.capture_thread = CameraCapture(self)
print("Starting a new capture thread...")
self.capture_thread.start()
self.fps.start()
self.skipped_region_tracker.start()
def start_ffmpeg(self):
ffmpeg_cmd = (['ffmpeg'] +
self.ffmpeg_global_args +
self.ffmpeg_hwaccel_args +
self.ffmpeg_input_args +
['-i', self.ffmpeg_input] +
self.ffmpeg_output_args +
['pipe:'])
print(" ".join(ffmpeg_cmd))
self.ffmpeg_process = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, bufsize=self.frame_size)
# pre-compute estimated person size for every pixel in the image
if 'known_sizes' in self.config:
self.calculated_person_sizes = compute_sizes((self.frame_shape[0], self.frame_shape[1]),
self.config['known_sizes'], None)
else:
self.calculated_person_sizes = None
def start(self):
self.start_or_restart_capture()
self.watchdog.start()
self.capture_process.start()
# start the object detection prep threads
for detection_prep_thread in self.detection_prep_threads:
detection_prep_thread.start()
def join(self):
self.capture_thread.join()
self.capture_process.join()
def get_capture_pid(self):
return self.ffmpeg_process.pid
return self.capture_process.pid
def get_best(self, label):
return self.best_frames.best_frames.get(label)
def add_objects(self, objects):
if len(objects) == 0:
return
def stats(self):
return {
'camera_fps': self.fps.eps(60),
'resize_queue': self.resize_queue.qsize(),
'frame_queue': self.frame_queue.qsize(),
'finished_frame_queue': self.finished_frame_queue.qsize(),
'refined_frame_queue': self.refined_frame_queue.qsize(),
'regions_in_process': self.regions_in_process,
'dynamic_regions_per_sec': self.dynamic_region_fps.eps(),
'skipped_regions_per_sec': self.skipped_region_tracker.eps(60)
}
def frame_with_objects(self, frame_time, tracked_objects=None):
if not frame_time in self.frame_cache:
frame = np.zeros(self.frame_shape, np.uint8)
else:
frame = self.frame_cache[frame_time].copy()
for obj in objects:
if self.debug:
# print out the detected objects, scores and locations
print(self.name, obj['name'], obj['score'], obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'])
detected_objects = self.detected_objects[frame_time].copy()
location = (int(obj['ymax']), int((obj['xmax']-obj['xmin'])/2))
# if the person is in a masked location, continue
if self.mask[location[0]][location[1]] == [0]:
continue
if self.calculated_person_sizes is not None and obj['name'] == 'person':
person_size_range = self.calculated_person_sizes[location[0]][location[1]]
# if the person isnt on the ground, continue
if(person_size_range[0] == 0 and person_size_range[1] == 0):
continue
person_size = (obj['xmax']-obj['xmin'])*(obj['ymax']-obj['ymin'])
# if the person is not within 20% of the estimated size for that location, continue
if person_size < person_size_range[0] or person_size > person_size_range[1]:
continue
self.detected_objects.append(obj)
with self.objects_parsed:
self.objects_parsed.notify_all()
def get_best_person(self):
return self.best_person_frame.best_frame
def get_current_frame_with_objects(self):
# make a copy of the current detected objects
detected_objects = self.detected_objects.copy()
# lock and make a copy of the current frame
with self.frame_lock:
frame = self.shared_frame_np.copy()
# convert to RGB for drawing
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# draw the bounding boxes on the screen
for obj in detected_objects:
vis_util.draw_bounding_box_on_image_array(frame,
obj['ymin'],
obj['xmin'],
obj['ymax'],
obj['xmax'],
color='red',
thickness=2,
display_str_list=["{}: {}%".format(obj['name'],int(obj['score']*100))],
use_normalized_coordinates=False)
for region in self.regions:
color = (255,255,255)
@@ -358,48 +319,11 @@ class Camera:
(region['x_offset']+region['size'], region['y_offset']+region['size']),
color, 2)
# draw the bounding boxes on the screen
if tracked_objects is None:
with self.object_tracker.tracked_objects_lock:
tracked_objects = copy.deepcopy(self.object_tracker.tracked_objects)
for obj in detected_objects:
draw_box_with_label(frame, obj['box']['xmin'], obj['box']['ymin'], obj['box']['xmax'], obj['box']['ymax'], obj['name'], "{}% {}".format(int(obj['score']*100), obj['area']), thickness=3)
for id, obj in tracked_objects.items():
color = (0, 255,0) if obj['frame_time'] == frame_time else (255, 0, 0)
draw_box_with_label(frame, obj['box']['xmin'], obj['box']['ymin'], obj['box']['xmax'], obj['box']['ymax'], obj['name'], id, color=color, thickness=1, position='bl')
# print a timestamp
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
cv2.putText(frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
# print fps
cv2.putText(frame, str(self.fps.eps())+'FPS', (10, 60), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
# convert to BGR
# convert back to BGR
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# encode the image into a jpg
ret, jpg = cv2.imencode('.jpg', frame)
return jpg.tobytes()
def get_current_frame_with_objects(self):
frame_time = self.last_processed_frame
if frame_time == self.cached_frame_with_objects['frame_time']:
return self.cached_frame_with_objects['frame_bytes']
frame_bytes = self.frame_with_objects(frame_time)
self.cached_frame_with_objects = {
'frame_bytes': frame_bytes,
'frame_time': frame_time
}
return frame_bytes
return frame