cleanup and update readme

This commit is contained in:
blakeblackshear
2019-03-30 07:58:31 -05:00
parent 4476bd8a13
commit e0b9b616ce
12 changed files with 74 additions and 278 deletions

View File

@@ -1,116 +0,0 @@
import datetime
import numpy as np
import cv2
import imutils
from . util import tonumpyarray
# do the actual motion detection
def detect_motion(shared_arr, shared_frame_time, frame_lock, frame_ready, motion_detected, motion_changed,
frame_shape, region_size, region_x_offset, region_y_offset, min_motion_area, mask, debug):
# shape shared input array into frame for processing
arr = tonumpyarray(shared_arr).reshape(frame_shape)
avg_frame = None
avg_delta = None
last_motion = -1
frame_time = 0.0
motion_frames = 0
while True:
now = datetime.datetime.now().timestamp()
# if it has been long enough since the last motion, clear the flag
if last_motion > 0 and (now - last_motion) > 5:
last_motion = -1
if motion_detected.is_set():
motion_detected.clear()
with motion_changed:
motion_changed.notify_all()
with frame_ready:
# if there isnt a frame ready for processing or it is old, wait for a signal
if shared_frame_time.value == frame_time or (now - shared_frame_time.value) > 0.5:
frame_ready.wait()
# lock and make a copy of the cropped frame
with frame_lock:
cropped_frame = arr[region_y_offset:region_y_offset+region_size, region_x_offset:region_x_offset+region_size].copy()
frame_time = shared_frame_time.value
# convert to grayscale
gray = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2GRAY)
# apply image mask to remove areas from motion detection
gray[mask] = [255]
# apply gaussian blur
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if avg_frame is None:
avg_frame = gray.copy().astype("float")
continue
# look at the delta from the avg_frame
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg_frame))
if avg_delta is None:
avg_delta = frameDelta.copy().astype("float")
# compute the average delta over the past few frames
# the alpha value can be modified to configure how sensitive the motion detection is.
# higher values mean the current frame impacts the delta a lot, and a single raindrop may
# register as motion, too low and a fast moving person wont be detected as motion
# this also assumes that a person is in the same location across more than a single frame
cv2.accumulateWeighted(frameDelta, avg_delta, 0.2)
# compute the threshold image for the current frame
current_thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# black out everything in the avg_delta where there isnt motion in the current frame
avg_delta_image = cv2.convertScaleAbs(avg_delta)
avg_delta_image[np.where(current_thresh==[0])] = [0]
# then look for deltas above the threshold, but only in areas where there is a delta
# in the current frame. this prevents deltas from previous frames from being included
thresh = cv2.threshold(avg_delta_image, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
motion_found = False
# loop over the contours
for c in cnts:
# if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c)
if contour_area > min_motion_area:
motion_found = True
if debug:
cv2.drawContours(cropped_frame, [c], -1, (0, 255, 0), 2)
x, y, w, h = cv2.boundingRect(c)
cv2.putText(cropped_frame, str(contour_area), (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 100, 0), 2)
else:
break
if motion_found:
motion_frames += 1
# if there have been enough consecutive motion frames, report motion
if motion_frames >= 3:
# only average in the current frame if the difference persists for at least 3 frames
cv2.accumulateWeighted(gray, avg_frame, 0.01)
motion_detected.set()
with motion_changed:
motion_changed.notify_all()
last_motion = now
else:
# when no motion, just keep averaging the frames together
cv2.accumulateWeighted(gray, avg_frame, 0.01)
motion_frames = 0
if debug and motion_frames == 3:
cv2.imwrite("/lab/debug/motion-{}-{}-{}.jpg".format(region_x_offset, region_y_offset, datetime.datetime.now().timestamp()), cropped_frame)
cv2.imwrite("/lab/debug/avg_delta-{}-{}-{}.jpg".format(region_x_offset, region_y_offset, datetime.datetime.now().timestamp()), avg_delta_image)

View File

@@ -1,29 +1,6 @@
import json
import threading
class MqttMotionPublisher(threading.Thread):
def __init__(self, client, topic_prefix, motion_changed, motion_flags):
threading.Thread.__init__(self)
self.client = client
self.topic_prefix = topic_prefix
self.motion_changed = motion_changed
self.motion_flags = motion_flags
def run(self):
last_sent_motion = ""
while True:
with self.motion_changed:
self.motion_changed.wait()
# send message for motion
motion_status = 'OFF'
if any(obj.is_set() for obj in self.motion_flags):
motion_status = 'ON'
if last_sent_motion != motion_status:
last_sent_motion = motion_status
self.client.publish(self.topic_prefix+'/motion', motion_status, retain=False)
class MqttObjectPublisher(threading.Thread):
def __init__(self, client, topic_prefix, objects_parsed, detected_objects):
threading.Thread.__init__(self)

View File

@@ -36,13 +36,10 @@ class PreppedQueueProcessor(threading.Thread):
# process queue...
while True:
frame = self.prepped_frame_queue.get()
# print(self.prepped_frame_queue.qsize())
# Actual detection.
objects = self.engine.DetectWithInputTensor(frame['frame'], threshold=0.5, top_k=3)
# time.sleep(0.1)
# objects = []
# print(self.engine.get_inference_time())
# put detected objects in the queue
# parse and pass detected objects back to the camera
parsed_objects = []
for obj in objects:
box = obj.bounding_box.flatten().tolist()
@@ -99,7 +96,6 @@ class FramePrepper(threading.Thread):
# Expand dimensions since the model expects images to have shape: [1, 300, 300, 3]
frame_expanded = np.expand_dims(cropped_frame_rgb, axis=0)
# print("Prepped frame at " + str(self.region_x_offset) + "," + str(self.region_y_offset))
# add the frame to the queue
if not self.prepped_frame_queue.full():
self.prepped_frame_queue.put({

View File

@@ -3,18 +3,6 @@ import datetime
import threading
import cv2
from object_detection.utils import visualization_utils as vis_util
class ObjectParser(threading.Thread):
def __init__(self, cameras, object_queue, detected_objects, regions):
threading.Thread.__init__(self)
self.cameras = cameras
self.object_queue = object_queue
self.regions = regions
def run(self):
# frame_times = {}
while True:
obj = self.object_queue.get()
self.cameras[obj['camera_name']].add_object(obj)
class ObjectCleaner(threading.Thread):
def __init__(self, objects_parsed, detected_objects):
@@ -34,7 +22,6 @@ class ObjectCleaner(threading.Thread):
# (newest objects are appended to the end)
detected_objects = self._detected_objects.copy()
#print([round(now-obj['frame_time'],2) for obj in detected_objects])
num_to_delete = 0
for obj in detected_objects:
if now-obj['frame_time']<2:
@@ -69,8 +56,6 @@ class BestPersonFrame(threading.Thread):
# make a copy of detected objects
detected_objects = self.detected_objects.copy()
detected_people = [obj for obj in detected_objects if obj['name'] == 'person']
# make a copy of the recent frames
recent_frames = self.recent_frames.copy()
# get the highest scoring person
new_best_person = max(detected_people, key=lambda x:x['score'], default=self.best_person)
@@ -89,7 +74,10 @@ class BestPersonFrame(threading.Thread):
# or the current person is more than 1 minute old, use the new best person
if new_best_person['score'] > self.best_person['score'] or (now - self.best_person['frame_time']) > 60:
self.best_person = new_best_person
# make a copy of the recent frames
recent_frames = self.recent_frames.copy()
if not self.best_person is None and self.best_person['frame_time'] in recent_frames:
best_frame = recent_frames[self.best_person['frame_time']]
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_BGR2RGB)

View File

@@ -8,11 +8,10 @@ import multiprocessing as mp
from object_detection.utils import visualization_utils as vis_util
from . util import tonumpyarray
from . object_detection import FramePrepper
from . objects import ObjectCleaner, ObjectParser, BestPersonFrame
from . objects import ObjectCleaner, BestPersonFrame
from . mqtt import MqttObjectPublisher
# fetch the frames as fast a possible, only decoding the frames when the
# detection_process has consumed the current frame
# fetch the frames as fast a possible and store current frame in a shared memory array
def fetch_frames(shared_arr, shared_frame_time, frame_lock, frame_ready, frame_shape, rtsp_url):
# convert shared memory array into numpy and shape into image array
arr = tonumpyarray(shared_arr).reshape(frame_shape)