Compare commits

...

11 Commits

Author SHA1 Message Date
blakeblackshear
228d5ed9c1 working odroid build, still needs hwaccel 2019-05-27 10:17:57 -05:00
Blake Blackshear
e791d6646b Merge pull request #34 from blakeblackshear/watchdog
0.1.2
2019-05-11 07:43:09 -05:00
blakeblackshear
3019b0218c make the threshold configurable per region. fixes #31 2019-05-11 07:39:27 -05:00
blakeblackshear
6900e140d5 add a watchdog to the capture process to detect silent failures. fixes #27 2019-05-11 07:16:15 -05:00
Blake Blackshear
911c1b2bfa Merge pull request #32 from tubalainen/patch-2
Clarification on username and password for MQTT
2019-05-11 07:14:19 -05:00
Blake Blackshear
f4587462cf Merge pull request #33 from tubalainen/patch-3
Update of the home assistant integration example
2019-05-11 07:14:01 -05:00
tubalainen
cac1faa8ac Update of the home assistant integration example
sensor to binary_sensor
device_class type "moving" does not exist, update to "motion"
2019-05-10 16:47:40 +02:00
tubalainen
9525bae5a3 Clarification on username and password for MQTT 2019-05-10 16:36:22 +02:00
blakeblackshear
dbcfd109f6 fix missing import 2019-05-10 06:19:39 -05:00
Blake Blackshear
f95d8b6210 Merge pull request #26 from blakeblackshear/mask
add the ability to mask the standing location of a person
2019-05-01 06:43:32 -05:00
blakeblackshear
4dacf02ef9 add the ability to mask the standing location of a person 2019-04-30 20:35:22 -05:00
7 changed files with 139 additions and 117 deletions

View File

@@ -1,70 +1,59 @@
FROM ubuntu:16.04
FROM ubuntu:18.04
# Install system packages
RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y python3 \
python3-dev \
python-pil \
python-lxml \
python-tk \
# Install packages for apt repo
RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
apt-transport-https \
ca-certificates \
curl \
wget \
gnupg-agent \
dirmngr \
software-properties-common
RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys D986B59D
RUN echo "deb http://deb.odroid.in/5422-s bionic main" > /etc/apt/sources.list.d/odroid.list
RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
python3 \
# OpenCV dependencies
ffmpeg \
build-essential \
cmake \
git \
libgtk2.0-dev \
pkg-config \
libavcodec-dev \
libavformat-dev \
libswscale-dev \
libtbb2 \
libtbb-dev \
cmake \
unzip \
pkg-config \
libjpeg-dev \
libpng-dev \
libtiff-dev \
libjasper-dev \
libdc1394-22-dev \
x11-apps \
wget \
vim \
ffmpeg \
unzip \
libusb-1.0-0-dev \
python3-setuptools \
libavcodec-dev \
libavformat-dev \
libswscale-dev \
libv4l-dev \
libxvidcore-dev \
libx264-dev \
libgtk-3-dev \
libatlas-base-dev \
gfortran \
python3-dev \
# Coral USB Python API Dependencies
libusb-1.0-0 \
python3-pip \
python3-pil \
python3-numpy \
zlib1g-dev \
libgoogle-glog-dev \
swig \
libunwind-dev \
libc++-dev \
libc++abi-dev \
build-essential \
libc++1 \
libc++abi1 \
libunwind8 \
libgcc1 \
&& rm -rf /var/lib/apt/lists/*
# Install core packages
RUN wget -q -O /tmp/get-pip.py --no-check-certificate https://bootstrap.pypa.io/get-pip.py && python3 /tmp/get-pip.py
RUN pip install -U pip \
numpy \
pillow \
matplotlib \
notebook \
Flask \
imutils \
paho-mqtt \
PyYAML
# Install tensorflow models object detection
RUN GIT_SSL_NO_VERIFY=true git clone -q https://github.com/tensorflow/models /usr/local/lib/python3.5/dist-packages/tensorflow/models
RUN wget -q -P /usr/local/src/ --no-check-certificate https://github.com/google/protobuf/releases/download/v3.5.1/protobuf-python-3.5.1.tar.gz
# Download & build protobuf-python
RUN cd /usr/local/src/ \
&& tar xf protobuf-python-3.5.1.tar.gz \
&& rm protobuf-python-3.5.1.tar.gz \
&& cd /usr/local/src/protobuf-3.5.1/ \
&& ./configure \
&& make \
&& make install \
&& ldconfig \
&& rm -rf /usr/local/src/protobuf-3.5.1/
# Download & build OpenCV
RUN wget -q -P /usr/local/src/ --no-check-certificate https://github.com/opencv/opencv/archive/4.0.1.zip
RUN cd /usr/local/src/ \
@@ -76,30 +65,31 @@ RUN cd /usr/local/src/ \
&& cmake -D CMAKE_INSTALL_TYPE=Release -D CMAKE_INSTALL_PREFIX=/usr/local/ .. \
&& make -j4 \
&& make install \
&& ldconfig \
&& rm -rf /usr/local/src/opencv-4.0.1
# Download and install EdgeTPU libraries
RUN wget -q -O edgetpu_api.tar.gz --no-check-certificate http://storage.googleapis.com/cloud-iot-edge-pretrained-models/edgetpu_api.tar.gz
# Download and install EdgeTPU libraries for Coral
RUN wget https://dl.google.com/coral/edgetpu_api/edgetpu_api_latest.tar.gz -O edgetpu_api.tar.gz --trust-server-names
RUN tar xzf edgetpu_api.tar.gz \
&& cd python-tflite-source \
&& cp -p libedgetpu/libedgetpu_x86_64.so /lib/x86_64-linux-gnu/libedgetpu.so \
&& cp edgetpu/swig/compiled_so/_edgetpu_cpp_wrapper_x86_64.so edgetpu/swig/_edgetpu_cpp_wrapper.so \
&& cp edgetpu/swig/compiled_so/edgetpu_cpp_wrapper.py edgetpu/swig/ \
&& python3 setup.py develop --user
&& cd edgetpu_api \
&& cp -p libedgetpu/libedgetpu_arm32.so /usr/lib/arm-linux-gnueabihf/libedgetpu.so.1.0 \
&& ldconfig \
&& python3 -m pip install --no-deps "$(ls edgetpu-*-py3-none-any.whl 2>/dev/null)"
RUN cd /usr/local/lib/python3.6/dist-packages/edgetpu/swig/ \
&& ln -s _edgetpu_cpp_wrapper.cpython-35m-arm-linux-gnueabihf.so _edgetpu_cpp_wrapper.cpython-36m-arm-linux-gnueabihf.so
# symlink the model and labels
RUN wget https://dl.google.com/coral/canned_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite -O mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite --trust-server-names
RUN wget https://dl.google.com/coral/canned_models/coco_labels.txt -O coco_labels.txt --trust-server-names
RUN ln -s mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite /frozen_inference_graph.pb
RUN ln -s /coco_labels.txt /label_map.pbtext
# Minimize image size
RUN (apt-get autoremove -y; \
apt-get autoclean -y)
# symlink the model and labels
RUN ln -s /python-tflite-source/edgetpu/test_data/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite /frozen_inference_graph.pb
RUN ln -s /python-tflite-source/edgetpu/test_data/coco_labels.txt /label_map.pbtext
# Set TF object detection available
ENV PYTHONPATH "$PYTHONPATH:/usr/local/lib/python3.5/dist-packages/tensorflow/models/research:/usr/local/lib/python3.5/dist-packages/tensorflow/models/research/slim"
RUN cd /usr/local/lib/python3.5/dist-packages/tensorflow/models/research && protoc object_detection/protos/*.proto --python_out=.
WORKDIR /opt/frigate/
ADD frigate frigate/
COPY detect_objects.py .

View File

@@ -62,12 +62,12 @@ camera:
platform: generic
still_image_url: http://<ip>:5000/<camera_name>/best_person.jpg
sensor:
binary_sensor:
- name: Camera Person
platform: mqtt
state_topic: "frigate/<camera_name>/objects"
value_template: '{{ value_json.person }}'
device_class: moving
device_class: motion
availability_topic: "frigate/available"
```

BIN
config/back-mask.bmp Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 MiB

View File

@@ -3,6 +3,8 @@ web_port: 5000
mqtt:
host: mqtt.server.com
topic_prefix: frigate
# user: username # Optional -- Uncomment for use
# password: password # Optional -- Uncomment for use
cameras:
back:
@@ -13,37 +15,20 @@ cameras:
# values that begin with a "$" will be replaced with environment variable
password: $RTSP_PASSWORD
path: /cam/realmonitor?channel=1&subtype=2
mask: back-mask.bmp
regions:
- size: 350
x_offset: 0
y_offset: 300
min_person_area: 5000
threshold: 0.5
- size: 400
x_offset: 350
y_offset: 250
min_person_area: 2000
threshold: 0.5
- size: 400
x_offset: 750
y_offset: 250
min_person_area: 2000
back2:
rtsp:
user: viewer
host: 10.0.10.10
port: 554
# values that begin with a "$" will be replaced with environment variable
password: $RTSP_PASSWORD
path: /cam/realmonitor?channel=1&subtype=2
regions:
- size: 350
x_offset: 0
y_offset: 300
min_person_area: 5000
- size: 400
x_offset: 350
y_offset: 250
min_person_area: 2000
- size: 400
x_offset: 750
y_offset: 250
min_person_area: 2000
threshold: 0.5

View File

@@ -38,7 +38,7 @@ class PreppedQueueProcessor(threading.Thread):
frame = self.prepped_frame_queue.get()
# Actual detection.
objects = self.engine.DetectWithInputTensor(frame['frame'], threshold=0.5, top_k=3)
objects = self.engine.DetectWithInputTensor(frame['frame'], threshold=frame['region_threshold'], top_k=3)
# parse and pass detected objects back to the camera
parsed_objects = []
for obj in objects:
@@ -59,7 +59,7 @@ class PreppedQueueProcessor(threading.Thread):
class FramePrepper(threading.Thread):
def __init__(self, camera_name, shared_frame, frame_time, frame_ready,
frame_lock,
region_size, region_x_offset, region_y_offset,
region_size, region_x_offset, region_y_offset, region_threshold,
prepped_frame_queue):
threading.Thread.__init__(self)
@@ -71,6 +71,7 @@ class FramePrepper(threading.Thread):
self.region_size = region_size
self.region_x_offset = region_x_offset
self.region_y_offset = region_y_offset
self.region_threshold = region_threshold
self.prepped_frame_queue = prepped_frame_queue
def run(self):
@@ -103,6 +104,7 @@ class FramePrepper(threading.Thread):
'frame_time': frame_time,
'frame': frame_expanded.flatten().copy(),
'region_size': self.region_size,
'region_threshold': self.region_threshold,
'region_x_offset': self.region_x_offset,
'region_y_offset': self.region_y_offset
})

View File

@@ -2,7 +2,6 @@ import time
import datetime
import threading
import cv2
from object_detection.utils import visualization_utils as vis_util
class ObjectCleaner(threading.Thread):
def __init__(self, objects_parsed, detected_objects):
@@ -82,15 +81,10 @@ class BestPersonFrame(threading.Thread):
best_frame = recent_frames[self.best_person['frame_time']]
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_BGR2RGB)
# draw the bounding box on the frame
vis_util.draw_bounding_box_on_image_array(best_frame,
self.best_person['ymin'],
self.best_person['xmin'],
self.best_person['ymax'],
self.best_person['xmax'],
color='red',
thickness=2,
display_str_list=["{}: {}%".format(self.best_person['name'],int(self.best_person['score']*100))],
use_normalized_coordinates=False)
color = (255,0,0)
cv2.rectangle(best_frame, (self.best_person['xmin'], self.best_person['ymin']),
(self.best_person['xmax'], self.best_person['ymax']),
color, 2)
# convert back to BGR
self.best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR)

View File

@@ -5,7 +5,7 @@ import cv2
import threading
import ctypes
import multiprocessing as mp
from object_detection.utils import visualization_utils as vis_util
import numpy as np
from . util import tonumpyarray
from . object_detection import FramePrepper
from . objects import ObjectCleaner, BestPersonFrame
@@ -19,6 +19,7 @@ def fetch_frames(shared_arr, shared_frame_time, frame_lock, frame_ready, frame_s
# start the video capture
video = cv2.VideoCapture()
video.open(rtsp_url)
print("Opening the RTSP Url...")
# keep the buffer small so we minimize old data
video.set(cv2.CAP_PROP_BUFFERSIZE,1)
@@ -108,6 +109,22 @@ def get_rtsp_url(rtsp_config):
rtsp_config['password'], rtsp_config['host'], rtsp_config['port'],
rtsp_config['path'])
class CameraWatchdog(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
def run(self):
while True:
# wait a bit before checking
time.sleep(60)
if (datetime.datetime.now().timestamp() - self.camera.shared_frame_time.value) > 2:
print("last frame is more than 2 seconds old, restarting camera capture...")
self.camera.start_or_restart_capture()
time.sleep(5)
class Camera:
def __init__(self, name, config, prepped_frame_queue, mqtt_client, mqtt_prefix):
self.name = name
@@ -136,21 +153,24 @@ class Camera:
# shape current frame so it can be treated as a numpy image
self.shared_frame_np = tonumpyarray(self.shared_frame_array).reshape(self.frame_shape)
# create the process to capture frames from the RTSP stream and store in a shared array
self.capture_process = mp.Process(target=fetch_frames, args=(self.shared_frame_array,
self.shared_frame_time, self.frame_lock, self.frame_ready, self.frame_shape, self.rtsp_url))
self.capture_process.daemon = True
self.capture_process = None
# for each region, create a separate thread to resize the region and prep for detection
self.detection_prep_threads = []
for region in self.config['regions']:
# set a default threshold of 0.5 if not defined
if not 'threshold' in region:
region['threshold'] = 0.5
if not isinstance(region['threshold'], float):
print('Threshold is not a float. Setting to 0.5 default.')
region['threshold'] = 0.5
self.detection_prep_threads.append(FramePrepper(
self.name,
self.shared_frame_np,
self.shared_frame_time,
self.frame_ready,
self.frame_lock,
region['size'], region['x_offset'], region['y_offset'],
region['size'], region['x_offset'], region['y_offset'], region['threshold'],
prepped_frame_queue
))
@@ -170,12 +190,39 @@ class Camera:
# start a thread to publish object scores (currently only person)
mqtt_publisher = MqttObjectPublisher(self.mqtt_client, self.mqtt_topic_prefix, self.objects_parsed, self.detected_objects)
mqtt_publisher.start()
# create a watchdog thread for capture process
self.watchdog = CameraWatchdog(self)
# load in the mask for person detection
if 'mask' in self.config:
self.mask = cv2.imread("/config/{}".format(self.config['mask']), cv2.IMREAD_GRAYSCALE)
else:
self.mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
self.mask[:] = 255
def start_or_restart_capture(self):
if not self.capture_process is None:
print("Terminating the existing capture process...")
self.capture_process.terminate()
del self.capture_process
self.capture_process = None
# create the process to capture frames from the RTSP stream and store in a shared array
print("Creating a new capture process...")
self.capture_process = mp.Process(target=fetch_frames, args=(self.shared_frame_array,
self.shared_frame_time, self.frame_lock, self.frame_ready, self.frame_shape, self.rtsp_url))
self.capture_process.daemon = True
print("Starting a new capture process...")
self.capture_process.start()
def start(self):
self.capture_process.start()
self.start_or_restart_capture()
# start the object detection prep threads
for detection_prep_thread in self.detection_prep_threads:
detection_prep_thread.start()
self.watchdog.start()
def join(self):
self.capture_process.join()
@@ -206,6 +253,15 @@ class Camera:
# detected person, don't add it to detected objects
if region and region['min_person_area'] > person_area:
continue
# compute the coordinates of the person and make sure
# the location isnt outide the bounds of the image (can happen from rounding)
y_location = min(int(obj['ymax']), len(self.mask)-1)
x_location = min(int((obj['xmax']-obj['xmin'])/2.0), len(self.mask[0])-1)
# if the person is in a masked location, continue
if self.mask[y_location][x_location] == [0]:
continue
self.detected_objects.append(obj)
@@ -226,15 +282,10 @@ class Camera:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# draw the bounding boxes on the screen
for obj in detected_objects:
vis_util.draw_bounding_box_on_image_array(frame,
obj['ymin'],
obj['xmin'],
obj['ymax'],
obj['xmax'],
color='red',
thickness=2,
display_str_list=["{}: {}%".format(obj['name'],int(obj['score']*100))],
use_normalized_coordinates=False)
color = (255,0,0)
cv2.rectangle(frame, (obj['xmin'], obj['ymin']),
(obj['xmax'], obj['ymax']),
color, 2)
for region in self.regions:
color = (255,255,255)