forked from Github/frigate
Compare commits
22 Commits
v0.5.2
...
v0.6.0-rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
469259d663 | ||
|
|
f3db69d975 | ||
|
|
0914cb71ad | ||
|
|
0ae2806eb4 | ||
|
|
adcfe699c2 | ||
|
|
e5048f98b6 | ||
|
|
e6c6338266 | ||
|
|
1f03c8cb8c | ||
|
|
69f5249788 | ||
|
|
3a1f1c946b | ||
|
|
d88745af6e | ||
|
|
709d917f0c | ||
|
|
918386bdc1 | ||
|
|
a8c0fadf95 | ||
|
|
6dc7b8f246 | ||
|
|
71f6f0bee4 | ||
|
|
a00afb61c0 | ||
|
|
5dbe6c5f36 | ||
|
|
16732aa5b3 | ||
|
|
3d2f1437e4 | ||
|
|
fbe721c860 | ||
|
|
7383db60b0 |
@@ -24,6 +24,7 @@ RUN apt -qq update && apt -qq install --no-install-recommends -y \
|
||||
numpy \
|
||||
imutils \
|
||||
scipy \
|
||||
psutil \
|
||||
&& python3.7 -m pip install -U \
|
||||
Flask \
|
||||
paho-mqtt \
|
||||
@@ -49,6 +50,8 @@ RUN wget -q https://dl.google.com/coral/canned_models/coco_labels.txt -O /labelm
|
||||
RUN wget -q https://github.com/google-coral/edgetpu/raw/master/test_data/ssd_mobilenet_v2_coco_quant_postprocess.tflite -O /cpu_model.tflite
|
||||
|
||||
|
||||
RUN mkdir /cache /clips
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
ADD frigate frigate/
|
||||
COPY detect_objects.py .
|
||||
|
||||
115
README.md
115
README.md
@@ -42,6 +42,7 @@ Example docker-compose:
|
||||
- /dev/bus/usb:/dev/bus/usb
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- <path_to_config>:/config
|
||||
- <path_to_directory_for_clips>:/clips
|
||||
ports:
|
||||
- "5000:5000"
|
||||
environment:
|
||||
@@ -128,29 +129,125 @@ automation:
|
||||
- url: http://<ip>:5000/<camera_name>/person/best.jpg
|
||||
caption: A person was detected.
|
||||
```
|
||||
## Debugging Endpoint
|
||||
## HTTP Endpoints
|
||||
A web server is available on port 5000 with the following endpoints.
|
||||
|
||||
Keep in mind the MJPEG endpoint is for debugging only, but should not be used continuously as it will put additional load on the system.
|
||||
|
||||
Access the mjpeg stream at `http://localhost:5000/<camera_name>` and the best snapshot for any object type with at `http://localhost:5000/<camera_name>/<object_name>/best.jpg`
|
||||
### `/<camera_name>`
|
||||
An mjpeg stream for debugging. Keep in mind the mjpeg endpoint is for debugging only and will put additional load on the system when in use.
|
||||
|
||||
You can access a higher resolution mjpeg stream by appending `h=height-in-pixels` to the endpoint. For example `http://localhost:5000/back?h=1080`. You can also increase the FPS by appending `fps=frame-rate` to the URL such as `http://localhost:5000/back?fps=10` or both with `?fps=10&h=1000`
|
||||
|
||||
Debug info is available at `http://localhost:5000/debug/stats`
|
||||
### `/<camera_name>/<object_name>/best.jpg`
|
||||
The best snapshot for any object type. It is a full resolution image by default. You can change the size of the image by appending `h=height-in-pixels` to the endpoint.
|
||||
|
||||
### `/<camera_name>/latest.jpg`
|
||||
The most recent frame that frigate has finished processing. It is a full resolution image by default. You can change the size of the image by appending `h=height-in-pixels` to the endpoint.
|
||||
|
||||
## Using a custom model
|
||||
### `/debug/stats`
|
||||
Contains some granular debug info that can be used for sensors in HomeAssistant.
|
||||
|
||||
## MQTT Messages
|
||||
These are the MQTT messages generated by Frigate. The default topic_prefix is `frigate`, but can be changed in the config file.
|
||||
|
||||
### frigate/available
|
||||
Designed to be used as an availability topic with HomeAssistant. Possible message are:
|
||||
"online": published when frigate is running (on startup)
|
||||
"offline": published right before frigate stops
|
||||
|
||||
### frigate/<camera_name>/<object_name>
|
||||
Publishes `ON` or `OFF` and is designed to be used a as a binary sensor in HomeAssistant for whether or not that object type is detected.
|
||||
|
||||
### frigate/<camera_name>/<object_name>/snapshot
|
||||
Publishes a jpeg encoded frame of the detected object type. When the object is no longer detected, the highest confidence image is published or the original image
|
||||
is published again.
|
||||
|
||||
### frigate/<camera_name>/events/start
|
||||
Message published at the start of any tracked object. JSON looks as follows:
|
||||
```json
|
||||
{
|
||||
"label": "person",
|
||||
"score": 0.7890625,
|
||||
"box": [
|
||||
468,
|
||||
446,
|
||||
550,
|
||||
592
|
||||
],
|
||||
"area": 11972,
|
||||
"region": [
|
||||
403,
|
||||
395,
|
||||
613,
|
||||
605
|
||||
],
|
||||
"frame_time": 1594298020.819046,
|
||||
"centroid": [
|
||||
509,
|
||||
519
|
||||
],
|
||||
"id": "1594298020.819046-0",
|
||||
"start_time": 1594298020.819046,
|
||||
"top_score": 0.7890625,
|
||||
"history": [
|
||||
{
|
||||
"score": 0.7890625,
|
||||
"box": [
|
||||
468,
|
||||
446,
|
||||
550,
|
||||
592
|
||||
],
|
||||
"region": [
|
||||
403,
|
||||
395,
|
||||
613,
|
||||
605
|
||||
],
|
||||
"centroid": [
|
||||
509,
|
||||
519
|
||||
],
|
||||
"frame_time": 1594298020.819046
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### frigate/<camera_name>/events/end
|
||||
Same as `frigate/<camera_name>/events/start`, but with an `end_time` property as well.
|
||||
|
||||
### frigate/<zone_name>/<object_name>
|
||||
Publishes `ON` or `OFF` and is designed to be used a as a binary sensor in HomeAssistant for whether or not that object type is detected in the zone.
|
||||
|
||||
## Using a custom model or labels
|
||||
Models for both CPU and EdgeTPU (Coral) are bundled in the image. You can use your own models with volume mounts:
|
||||
- CPU Model: `/cpu_model.tflite`
|
||||
- EdgeTPU Model: `/edgetpu_model.tflite`
|
||||
- Labels: `/labelmap.txt`
|
||||
|
||||
### Customizing the Labelmap
|
||||
The labelmap can be customized to your needs. A common reason to do this is to combine multiple object types that are easily confused when you don't need to be as granular such as car/truck. You must retain the same number of labels, but you can change the names. To change:
|
||||
|
||||
- Download the [COCO labelmap](https://dl.google.com/coral/canned_models/coco_labels.txt)
|
||||
- Modify the label names as desired. For example, change `7 truck` to `7 car`
|
||||
- Mount the new file at `/labelmap.txt` in the container with an additional volume
|
||||
```
|
||||
-v ./config/labelmap.txt:/labelmap.txt
|
||||
```
|
||||
|
||||
## Masks and limiting detection to a certain area
|
||||
You can create a *bitmap (bmp)* file the same aspect ratio as your camera feed to limit detection to certain areas. The mask works by looking at the bottom center of any bounding box (first image, red dot below) and comparing that to your mask. If that red dot falls on an area of your mask that is black, the detection (and motion) will be ignored. The mask in the second image would limit detection on this camera to only objects that are in the front yard and not the street.
|
||||
|
||||
<a href="docs/example-mask-check-point.png"><img src="docs/example-mask-check-point.png" height="300"></a>
|
||||
<a href="docs/example-mask.bmp"><img src="docs/example-mask.bmp" height="300"></a>
|
||||
<a href="docs/example-mask-overlay.png"><img src="docs/example-mask-overlay.png" height="300"></a>
|
||||
<img src="docs/example-mask-check-point.png" height="300">
|
||||
<img src="docs/example-mask.bmp" height="300">
|
||||
<img src="docs/example-mask-overlay.png" height="300">
|
||||
|
||||
## Zones
|
||||
Zones allow you to define a specific area of the frame and apply additional filters for object types so you can determine whether or not an object is within a particular area. Zones cannot have the same name as a camera. If desired, a single zone can include multiple cameras if you have multiple cameras covering the same area. See the sample config for details on how to configure.
|
||||
|
||||
During testing, `draw_zones` can be set in the config to tell frigate to draw the zone on the frames so you can adjust as needed. The zone line will increase in thickness when any object enters the zone.
|
||||
|
||||

|
||||
|
||||
## Tips
|
||||
- Lower the framerate of the video feed on the camera to reduce the CPU usage for capturing the feed. Not as effective, but you can also modify the `take_frame` [configuration](config/config.example.yml) for each camera to only analyze every other frame, or every third frame, etc.
|
||||
|
||||
@@ -66,7 +66,42 @@ objects:
|
||||
person:
|
||||
min_area: 5000
|
||||
max_area: 100000
|
||||
threshold: 0.5
|
||||
threshold: 0.8
|
||||
|
||||
zones:
|
||||
#################
|
||||
# Name of the zone
|
||||
################
|
||||
front_steps:
|
||||
cameras:
|
||||
front_door:
|
||||
####################
|
||||
# For each camera, a list of x,y coordinates to define the polygon of the zone. The top
|
||||
# left corner is 0,0. Can also be a comma separated string of all x,y coordinates combined.
|
||||
# The same zone can exist across multiple cameras if they have overlapping FOVs.
|
||||
# An object is determined to be in the zone based on whether or not the bottom center
|
||||
# of it's bounding box is within the polygon. The polygon must have at least 3 points.
|
||||
# Coordinates can be generated at https://www.image-map.net/
|
||||
####################
|
||||
coordinates:
|
||||
- 545,1077
|
||||
- 747,939
|
||||
- 788,805
|
||||
################
|
||||
# Zone level object filters. These are applied in addition to the global and camera filters
|
||||
# and should be more restrictive than the global and camera filters. The global and camera
|
||||
# filters are applied upstream.
|
||||
################
|
||||
filters:
|
||||
person:
|
||||
min_area: 5000
|
||||
max_area: 100000
|
||||
threshold: 0.8
|
||||
driveway:
|
||||
cameras:
|
||||
front_door:
|
||||
coordinates: 545,1077,747,939,788,805
|
||||
yard:
|
||||
|
||||
cameras:
|
||||
back:
|
||||
@@ -91,7 +126,9 @@ cameras:
|
||||
# width: 720
|
||||
|
||||
################
|
||||
## Optional mask. Must be the same aspect ratio as your video feed.
|
||||
## Optional mask. Must be the same aspect ratio as your video feed. Value is either the
|
||||
## name of a file in the config directory or a base64 encoded bmp image prefixed with
|
||||
## 'base64,' eg. 'base64,asfasdfasdf....'.
|
||||
##
|
||||
## The mask works by looking at the bottom center of the bounding box for the detected
|
||||
## person in the image. If that pixel in the mask is a black pixel, it ignores it as a
|
||||
@@ -110,11 +147,34 @@ cameras:
|
||||
################
|
||||
take_frame: 1
|
||||
|
||||
################
|
||||
# This will save a clip for each tracked object by frigate along with a json file that contains
|
||||
# data related to the tracked object. This works by telling ffmpeg to write video segments to /cache
|
||||
# from the video stream without re-encoding. Clips are then created by using ffmpeg to merge segments
|
||||
# without re-encoding. The segments saved are unaltered from what frigate receives to avoid re-encoding.
|
||||
# They do not contain bounding boxes. 30 seconds of video is added to the start of the clip. These are
|
||||
# optimized to capture "false_positive" examples for improving frigate.
|
||||
#
|
||||
# NOTE: This will only work for camera feeds that can be copied into the mp4 container format without
|
||||
# encoding such as h264. I do not expect this to work for mjpeg streams, and it may not work for many other
|
||||
# types of streams.
|
||||
#
|
||||
# WARNING: Videos in /cache are retained until there are no ongoing events. If you are tracking cars or
|
||||
# other objects for long periods of time, the cache will continue to grow indefinitely.
|
||||
################
|
||||
save_clips:
|
||||
enabled: False
|
||||
#########
|
||||
# Number of seconds before the event to include in the clips
|
||||
#########
|
||||
pre_capture: 30
|
||||
|
||||
################
|
||||
# Configuration for the snapshots in the debug view and mqtt
|
||||
################
|
||||
snapshots:
|
||||
show_timestamp: True
|
||||
draw_zones: False
|
||||
|
||||
################
|
||||
# Camera level object config. This config is merged with the global config above.
|
||||
@@ -126,4 +186,4 @@ cameras:
|
||||
person:
|
||||
min_area: 5000
|
||||
max_area: 100000
|
||||
threshold: 0.5
|
||||
threshold: 0.8
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
import signal
|
||||
@@ -17,6 +18,7 @@ import paho.mqtt.client as mqtt
|
||||
|
||||
from frigate.video import track_camera, get_ffmpeg_input, get_frame_shape, CameraCapture, start_or_restart_ffmpeg
|
||||
from frigate.object_processing import TrackedObjectProcessor
|
||||
from frigate.events import EventProcessor
|
||||
from frigate.util import EventsPerSecond
|
||||
from frigate.edgetpu import EdgeTPUProcess
|
||||
|
||||
@@ -47,7 +49,6 @@ FFMPEG_DEFAULT_CONFIG = {
|
||||
'-flags', 'low_delay',
|
||||
'-strict', 'experimental',
|
||||
'-fflags', '+genpts+discardcorrupt',
|
||||
'-vsync', 'drop',
|
||||
'-rtsp_transport', 'tcp',
|
||||
'-stimeout', '5000000',
|
||||
'-use_wallclock_as_timestamps', '1']),
|
||||
@@ -71,13 +72,14 @@ def start_plasma_store():
|
||||
return plasma_process
|
||||
|
||||
class CameraWatchdog(threading.Thread):
|
||||
def __init__(self, camera_processes, config, tflite_process, tracked_objects_queue, plasma_process):
|
||||
def __init__(self, camera_processes, config, tflite_process, tracked_objects_queue, plasma_process, stop_event):
|
||||
threading.Thread.__init__(self)
|
||||
self.camera_processes = camera_processes
|
||||
self.config = config
|
||||
self.tflite_process = tflite_process
|
||||
self.tracked_objects_queue = tracked_objects_queue
|
||||
self.plasma_process = plasma_process
|
||||
self.stop_event = stop_event
|
||||
|
||||
def run(self):
|
||||
time.sleep(10)
|
||||
@@ -85,6 +87,10 @@ class CameraWatchdog(threading.Thread):
|
||||
# wait a bit before checking
|
||||
time.sleep(10)
|
||||
|
||||
if self.stop_event.is_set():
|
||||
print(f"Exiting watchdog...")
|
||||
break
|
||||
|
||||
now = datetime.datetime.now().timestamp()
|
||||
|
||||
# check the plasma process
|
||||
@@ -125,7 +131,7 @@ class CameraWatchdog(threading.Thread):
|
||||
frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
|
||||
ffmpeg_process = start_or_restart_ffmpeg(camera_process['ffmpeg_cmd'], frame_size)
|
||||
camera_capture = CameraCapture(name, ffmpeg_process, frame_shape, camera_process['frame_queue'],
|
||||
camera_process['take_frame'], camera_process['camera_fps'], camera_process['detection_frame'])
|
||||
camera_process['take_frame'], camera_process['camera_fps'], camera_process['detection_frame'], self.stop_event)
|
||||
camera_capture.start()
|
||||
camera_process['ffmpeg_process'] = ffmpeg_process
|
||||
camera_process['capture_thread'] = camera_capture
|
||||
@@ -142,6 +148,7 @@ class CameraWatchdog(threading.Thread):
|
||||
ffmpeg_process.communicate()
|
||||
|
||||
def main():
|
||||
stop_event = threading.Event()
|
||||
# connect to mqtt and setup last will
|
||||
def on_connect(client, userdata, flags, rc):
|
||||
print("On connect called")
|
||||
@@ -171,11 +178,15 @@ def main():
|
||||
##
|
||||
for name, config in CONFIG['cameras'].items():
|
||||
config['snapshots'] = {
|
||||
'show_timestamp': config.get('snapshots', {}).get('show_timestamp', True)
|
||||
'show_timestamp': config.get('snapshots', {}).get('show_timestamp', True),
|
||||
'draw_zones': config.get('snapshots', {}).get('draw_zones', False)
|
||||
}
|
||||
|
||||
# Queue for cameras to push tracked objects to
|
||||
tracked_objects_queue = mp.SimpleQueue()
|
||||
tracked_objects_queue = mp.Queue()
|
||||
|
||||
# Queue for clip processing
|
||||
event_queue = mp.Queue()
|
||||
|
||||
# Start the shared tflite process
|
||||
tflite_process = EdgeTPUProcess()
|
||||
@@ -190,6 +201,25 @@ def main():
|
||||
ffmpeg_hwaccel_args = ffmpeg.get('hwaccel_args', FFMPEG_DEFAULT_CONFIG['hwaccel_args'])
|
||||
ffmpeg_input_args = ffmpeg.get('input_args', FFMPEG_DEFAULT_CONFIG['input_args'])
|
||||
ffmpeg_output_args = ffmpeg.get('output_args', FFMPEG_DEFAULT_CONFIG['output_args'])
|
||||
if config.get('save_clips', {}).get('enabled', False):
|
||||
ffmpeg_output_args = [
|
||||
"-f",
|
||||
"segment",
|
||||
"-segment_time",
|
||||
"10",
|
||||
"-segment_format",
|
||||
"mp4",
|
||||
"-reset_timestamps",
|
||||
"1",
|
||||
"-strftime",
|
||||
"1",
|
||||
"-c",
|
||||
"copy",
|
||||
"-an",
|
||||
"-map",
|
||||
"0",
|
||||
f"/cache/{name}-%Y%m%d%H%M%S.mp4"
|
||||
] + ffmpeg_output_args
|
||||
ffmpeg_cmd = (['ffmpeg'] +
|
||||
ffmpeg_global_args +
|
||||
ffmpeg_hwaccel_args +
|
||||
@@ -209,10 +239,10 @@ def main():
|
||||
detection_frame = mp.Value('d', 0.0)
|
||||
|
||||
ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, frame_size)
|
||||
frame_queue = mp.SimpleQueue()
|
||||
frame_queue = mp.Queue()
|
||||
camera_fps = EventsPerSecond()
|
||||
camera_fps.start()
|
||||
camera_capture = CameraCapture(name, ffmpeg_process, frame_shape, frame_queue, take_frame, camera_fps, detection_frame)
|
||||
camera_capture = CameraCapture(name, ffmpeg_process, frame_shape, frame_queue, take_frame, camera_fps, detection_frame, stop_event)
|
||||
camera_capture.start()
|
||||
|
||||
camera_processes[name] = {
|
||||
@@ -240,12 +270,31 @@ def main():
|
||||
camera_process['process'].start()
|
||||
print(f"Camera_process started for {name}: {camera_process['process'].pid}")
|
||||
|
||||
object_processor = TrackedObjectProcessor(CONFIG['cameras'], client, MQTT_TOPIC_PREFIX, tracked_objects_queue)
|
||||
event_processor = EventProcessor(CONFIG['cameras'], camera_processes, '/cache', '/clips', event_queue, stop_event)
|
||||
event_processor.start()
|
||||
|
||||
object_processor = TrackedObjectProcessor(CONFIG['cameras'], CONFIG.get('zones', {}), client, MQTT_TOPIC_PREFIX, tracked_objects_queue, event_queue,stop_event)
|
||||
object_processor.start()
|
||||
|
||||
camera_watchdog = CameraWatchdog(camera_processes, CONFIG['cameras'], tflite_process, tracked_objects_queue, plasma_process)
|
||||
camera_watchdog = CameraWatchdog(camera_processes, CONFIG['cameras'], tflite_process, tracked_objects_queue, plasma_process, stop_event)
|
||||
camera_watchdog.start()
|
||||
|
||||
def receiveSignal(signalNumber, frame):
|
||||
print('Received:', signalNumber)
|
||||
stop_event.set()
|
||||
event_processor.join()
|
||||
object_processor.join()
|
||||
camera_watchdog.join()
|
||||
for name, camera_process in camera_processes.items():
|
||||
camera_process['capture_thread'].join()
|
||||
rc = camera_watchdog.plasma_process.poll()
|
||||
if rc == None:
|
||||
camera_watchdog.plasma_process.terminate()
|
||||
sys.exit()
|
||||
|
||||
signal.signal(signal.SIGTERM, receiveSignal)
|
||||
signal.signal(signal.SIGINT, receiveSignal)
|
||||
|
||||
# create a flask app that encodes frames a mjpeg on demand
|
||||
app = Flask(__name__)
|
||||
log = logging.getLogger('werkzeug')
|
||||
@@ -315,6 +364,11 @@ def main():
|
||||
best_frame = object_processor.get_best(camera_name, label)
|
||||
if best_frame is None:
|
||||
best_frame = np.zeros((720,1280,3), np.uint8)
|
||||
|
||||
height = int(request.args.get('h', str(best_frame.shape[0])))
|
||||
width = int(height*best_frame.shape[1]/best_frame.shape[0])
|
||||
|
||||
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR)
|
||||
ret, jpg = cv2.imencode('.jpg', best_frame)
|
||||
response = make_response(jpg.tobytes())
|
||||
@@ -334,6 +388,27 @@ def main():
|
||||
else:
|
||||
return "Camera named {} not found".format(camera_name), 404
|
||||
|
||||
@app.route('/<camera_name>/latest.jpg')
|
||||
def latest_frame(camera_name):
|
||||
if camera_name in CONFIG['cameras']:
|
||||
# max out at specified FPS
|
||||
frame = object_processor.get_current_frame(camera_name)
|
||||
if frame is None:
|
||||
frame = np.zeros((720,1280,3), np.uint8)
|
||||
|
||||
height = int(request.args.get('h', str(frame.shape[0])))
|
||||
width = int(height*frame.shape[1]/frame.shape[0])
|
||||
|
||||
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
||||
|
||||
ret, jpg = cv2.imencode('.jpg', frame)
|
||||
response = make_response(jpg.tobytes())
|
||||
response.headers['Content-Type'] = 'image/jpg'
|
||||
return response
|
||||
else:
|
||||
return "Camera named {} not found".format(camera_name), 404
|
||||
|
||||
def imagestream(camera_name, fps, height):
|
||||
while True:
|
||||
# max out at specified FPS
|
||||
|
||||
BIN
docs/zone_example.jpg
Normal file
BIN
docs/zone_example.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 73 KiB |
@@ -87,7 +87,7 @@ def run_detector(detection_queue, avg_speed, start):
|
||||
|
||||
class EdgeTPUProcess():
|
||||
def __init__(self):
|
||||
self.detection_queue = mp.SimpleQueue()
|
||||
self.detection_queue = mp.Queue()
|
||||
self.avg_inference_speed = mp.Value('d', 0.01)
|
||||
self.detection_start = mp.Value('d', 0.0)
|
||||
self.detect_process = None
|
||||
|
||||
158
frigate/events.py
Normal file
158
frigate/events.py
Normal file
@@ -0,0 +1,158 @@
|
||||
import os
|
||||
import time
|
||||
import psutil
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
import json
|
||||
import datetime
|
||||
import subprocess as sp
|
||||
import queue
|
||||
|
||||
class EventProcessor(threading.Thread):
|
||||
def __init__(self, config, camera_processes, cache_dir, clip_dir, event_queue, stop_event):
|
||||
threading.Thread.__init__(self)
|
||||
self.config = config
|
||||
self.camera_processes = camera_processes
|
||||
self.cache_dir = cache_dir
|
||||
self.clip_dir = clip_dir
|
||||
self.cached_clips = {}
|
||||
self.event_queue = event_queue
|
||||
self.events_in_process = {}
|
||||
self.stop_event = stop_event
|
||||
|
||||
def refresh_cache(self):
|
||||
cached_files = os.listdir(self.cache_dir)
|
||||
|
||||
files_in_use = []
|
||||
for process_data in self.camera_processes.values():
|
||||
try:
|
||||
ffmpeg_process = psutil.Process(pid=process_data['ffmpeg_process'].pid)
|
||||
flist = ffmpeg_process.open_files()
|
||||
if flist:
|
||||
for nt in flist:
|
||||
if nt.path.startswith(self.cache_dir):
|
||||
files_in_use.append(nt.path.split('/')[-1])
|
||||
except:
|
||||
continue
|
||||
|
||||
for f in cached_files:
|
||||
if f in files_in_use or f in self.cached_clips:
|
||||
continue
|
||||
|
||||
camera = '-'.join(f.split('-')[:-1])
|
||||
start_time = datetime.datetime.strptime(f.split('-')[-1].split('.')[0], '%Y%m%d%H%M%S')
|
||||
|
||||
ffprobe_cmd = " ".join([
|
||||
'ffprobe',
|
||||
'-v',
|
||||
'error',
|
||||
'-show_entries',
|
||||
'format=duration',
|
||||
'-of',
|
||||
'default=noprint_wrappers=1:nokey=1',
|
||||
f"{os.path.join(self.cache_dir,f)}"
|
||||
])
|
||||
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
|
||||
(output, err) = p.communicate()
|
||||
p_status = p.wait()
|
||||
if p_status == 0:
|
||||
duration = float(output.decode('utf-8').strip())
|
||||
else:
|
||||
print(f"bad file: {f}")
|
||||
os.remove(os.path.join(self.cache_dir,f))
|
||||
continue
|
||||
|
||||
self.cached_clips[f] = {
|
||||
'path': f,
|
||||
'camera': camera,
|
||||
'start_time': start_time.timestamp(),
|
||||
'duration': duration
|
||||
}
|
||||
|
||||
if len(self.events_in_process) > 0:
|
||||
earliest_event = min(self.events_in_process.values(), key=lambda x:x['start_time'])['start_time']
|
||||
else:
|
||||
earliest_event = datetime.datetime.now().timestamp()
|
||||
|
||||
for f, data in list(self.cached_clips.items()):
|
||||
if earliest_event-90 > data['start_time']+data['duration']:
|
||||
del self.cached_clips[f]
|
||||
os.remove(os.path.join(self.cache_dir,f))
|
||||
|
||||
def create_clip(self, camera, event_data, pre_capture):
|
||||
# get all clips from the camera with the event sorted
|
||||
sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time'])
|
||||
|
||||
while sorted_clips[-1]['start_time'] + sorted_clips[-1]['duration'] < event_data['end_time']:
|
||||
time.sleep(5)
|
||||
self.refresh_cache()
|
||||
# get all clips from the camera with the event sorted
|
||||
sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time'])
|
||||
|
||||
playlist_start = event_data['start_time']-pre_capture
|
||||
playlist_end = event_data['end_time']+5
|
||||
playlist_lines = []
|
||||
for clip in sorted_clips:
|
||||
# clip ends before playlist start time, skip
|
||||
if clip['start_time']+clip['duration'] < playlist_start:
|
||||
continue
|
||||
# clip starts after playlist ends, finish
|
||||
if clip['start_time'] > playlist_end:
|
||||
break
|
||||
playlist_lines.append(f"file '{os.path.join(self.cache_dir,clip['path'])}'")
|
||||
# if this is the starting clip, add an inpoint
|
||||
if clip['start_time'] < playlist_start:
|
||||
playlist_lines.append(f"inpoint {int(playlist_start-clip['start_time'])}")
|
||||
# if this is the ending clip, add an outpoint
|
||||
if clip['start_time']+clip['duration'] > playlist_end:
|
||||
playlist_lines.append(f"outpoint {int(playlist_end-clip['start_time'])}")
|
||||
|
||||
clip_name = f"{camera}-{event_data['id']}"
|
||||
ffmpeg_cmd = [
|
||||
'ffmpeg',
|
||||
'-y',
|
||||
'-protocol_whitelist',
|
||||
'pipe,file',
|
||||
'-f',
|
||||
'concat',
|
||||
'-safe',
|
||||
'0',
|
||||
'-i',
|
||||
'-',
|
||||
'-c',
|
||||
'copy',
|
||||
f"{os.path.join(self.clip_dir, clip_name)}.mp4"
|
||||
]
|
||||
|
||||
p = sp.run(ffmpeg_cmd, input="\n".join(playlist_lines), encoding='ascii', capture_output=True)
|
||||
if p.returncode != 0:
|
||||
print(p.stderr)
|
||||
return
|
||||
|
||||
with open(f"{os.path.join(self.clip_dir, clip_name)}.json", 'w') as outfile:
|
||||
json.dump(event_data, outfile)
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
if self.stop_event.is_set():
|
||||
print(f"Exiting event processor...")
|
||||
break
|
||||
|
||||
try:
|
||||
event_type, camera, event_data = self.event_queue.get(timeout=10)
|
||||
except queue.Empty:
|
||||
if not self.stop_event.is_set():
|
||||
self.refresh_cache()
|
||||
continue
|
||||
|
||||
self.refresh_cache()
|
||||
|
||||
if event_type == 'start':
|
||||
self.events_in_process[event_data['id']] = event_data
|
||||
|
||||
if event_type == 'end':
|
||||
if self.config[camera].get('save_clips', {}).get('enabled', False) and len(self.cached_clips) > 0:
|
||||
self.create_clip(camera, event_data, self.config[camera].get('save_clips', {}).get('pre_capture', 30))
|
||||
del self.events_in_process[event_data['id']]
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import time
|
||||
import copy
|
||||
import cv2
|
||||
import threading
|
||||
import queue
|
||||
import numpy as np
|
||||
from collections import Counter, defaultdict
|
||||
import itertools
|
||||
@@ -22,13 +23,44 @@ COLOR_MAP = {}
|
||||
for key, val in LABELS.items():
|
||||
COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
|
||||
|
||||
def filter_false_positives(event):
|
||||
if len(event['history']) < 2:
|
||||
return True
|
||||
return False
|
||||
|
||||
def zone_filtered(obj, object_config):
|
||||
object_name = obj['label']
|
||||
object_filters = object_config.get('filters', {})
|
||||
|
||||
if object_name in object_filters:
|
||||
obj_settings = object_filters[object_name]
|
||||
|
||||
# if the min area is larger than the
|
||||
# detected object, don't add it to detected objects
|
||||
if obj_settings.get('min_area',-1) > obj['area']:
|
||||
return True
|
||||
|
||||
# if the detected object is larger than the
|
||||
# max area, don't add it to detected objects
|
||||
if obj_settings.get('max_area', 24000000) < obj['area']:
|
||||
return True
|
||||
|
||||
# if the score is lower than the threshold, skip
|
||||
if obj_settings.get('threshold', 0) > obj['score']:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
class TrackedObjectProcessor(threading.Thread):
|
||||
def __init__(self, config, client, topic_prefix, tracked_objects_queue):
|
||||
def __init__(self, camera_config, zone_config, client, topic_prefix, tracked_objects_queue, event_queue, stop_event):
|
||||
threading.Thread.__init__(self)
|
||||
self.config = config
|
||||
self.camera_config = camera_config
|
||||
self.zone_config = zone_config
|
||||
self.client = client
|
||||
self.topic_prefix = topic_prefix
|
||||
self.tracked_objects_queue = tracked_objects_queue
|
||||
self.event_queue = event_queue
|
||||
self.stop_event = stop_event
|
||||
self.camera_data = defaultdict(lambda: {
|
||||
'best_objects': {},
|
||||
'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')),
|
||||
@@ -37,7 +69,29 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
'current_frame_time': 0.0,
|
||||
'object_id': None
|
||||
})
|
||||
self.plasma_client = PlasmaManager()
|
||||
self.zone_data = defaultdict(lambda: {
|
||||
'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')),
|
||||
'contours': {}
|
||||
})
|
||||
|
||||
# create zone contours
|
||||
for name, config in zone_config.items():
|
||||
for camera, camera_zone_config in config.items():
|
||||
coordinates = camera_zone_config['coordinates']
|
||||
if isinstance(coordinates, list):
|
||||
self.zone_data[name]['contours'][camera] = np.array([[int(p.split(',')[0]), int(p.split(',')[1])] for p in coordinates])
|
||||
elif isinstance(coordinates, str):
|
||||
points = coordinates.split(',')
|
||||
self.zone_data[name]['contours'][camera] = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
|
||||
else:
|
||||
print(f"Unable to parse zone coordinates for {name} - {camera}")
|
||||
|
||||
# set colors for zones
|
||||
colors = plt.cm.get_cmap('tab10', len(self.zone_data.keys()))
|
||||
for i, zone in enumerate(self.zone_data.values()):
|
||||
zone['color'] = tuple(int(round(255 * c)) for c in colors(i)[:3])
|
||||
|
||||
self.plasma_client = PlasmaManager(self.stop_event)
|
||||
|
||||
def get_best(self, camera, label):
|
||||
if label in self.camera_data[camera]['best_objects']:
|
||||
@@ -50,14 +104,61 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
camera, frame_time, tracked_objects = self.tracked_objects_queue.get()
|
||||
if self.stop_event.is_set():
|
||||
print(f"Exiting object processor...")
|
||||
break
|
||||
|
||||
config = self.config[camera]
|
||||
try:
|
||||
camera, frame_time, current_tracked_objects = self.tracked_objects_queue.get(True, 10)
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
camera_config = self.camera_config[camera]
|
||||
best_objects = self.camera_data[camera]['best_objects']
|
||||
current_object_status = self.camera_data[camera]['object_status']
|
||||
self.camera_data[camera]['tracked_objects'] = tracked_objects
|
||||
tracked_objects = self.camera_data[camera]['tracked_objects']
|
||||
|
||||
current_ids = current_tracked_objects.keys()
|
||||
previous_ids = tracked_objects.keys()
|
||||
removed_ids = list(set(previous_ids).difference(current_ids))
|
||||
new_ids = list(set(current_ids).difference(previous_ids))
|
||||
updated_ids = list(set(current_ids).intersection(previous_ids))
|
||||
|
||||
for id in new_ids:
|
||||
# only register the object here if we are sure it isnt a false positive
|
||||
if not filter_false_positives(current_tracked_objects[id]):
|
||||
tracked_objects[id] = current_tracked_objects[id]
|
||||
# publish events to mqtt
|
||||
self.client.publish(f"{self.topic_prefix}/{camera}/events/start", json.dumps(tracked_objects[id]), retain=False)
|
||||
self.event_queue.put(('start', camera, tracked_objects[id]))
|
||||
|
||||
for id in updated_ids:
|
||||
tracked_objects[id] = current_tracked_objects[id]
|
||||
|
||||
for id in removed_ids:
|
||||
# publish events to mqtt
|
||||
tracked_objects[id]['end_time'] = frame_time
|
||||
self.client.publish(f"{self.topic_prefix}/{camera}/events/end", json.dumps(tracked_objects[id]), retain=False)
|
||||
self.event_queue.put(('end', camera, tracked_objects[id]))
|
||||
del tracked_objects[id]
|
||||
|
||||
self.camera_data[camera]['current_frame_time'] = frame_time
|
||||
|
||||
# build a dict of objects in each zone for current camera
|
||||
current_objects_in_zones = defaultdict(lambda: [])
|
||||
for obj in tracked_objects.values():
|
||||
bottom_center = (obj['centroid'][0], obj['box'][3])
|
||||
# check each zone
|
||||
for name, zone in self.zone_data.items():
|
||||
current_contour = zone['contours'].get(camera, None)
|
||||
# if the current camera does not have a contour for this zone, skip
|
||||
if current_contour is None:
|
||||
continue
|
||||
# check if the object is in the zone and not filtered
|
||||
if (cv2.pointPolygonTest(current_contour, bottom_center, False) >= 0
|
||||
and not zone_filtered(obj, self.zone_config[name][camera].get('filters', {}))):
|
||||
current_objects_in_zones[name].append(obj['label'])
|
||||
|
||||
###
|
||||
# Draw tracked objects on the frame
|
||||
###
|
||||
@@ -80,10 +181,16 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
region = obj['region']
|
||||
cv2.rectangle(current_frame, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1)
|
||||
|
||||
if config['snapshots']['show_timestamp']:
|
||||
if camera_config['snapshots']['show_timestamp']:
|
||||
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
|
||||
cv2.putText(current_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
|
||||
|
||||
if camera_config['snapshots']['draw_zones']:
|
||||
for name, zone in self.zone_data.items():
|
||||
thickness = 2 if len(current_objects_in_zones[name]) == 0 else 8
|
||||
if camera in zone['contours']:
|
||||
cv2.drawContours(current_frame, [zone['contours'][camera]], -1, zone['color'], thickness)
|
||||
|
||||
###
|
||||
# Set the current frame
|
||||
###
|
||||
@@ -108,6 +215,12 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
if obj['score'] > best_objects[obj['label']]['score'] or (now - best_objects[obj['label']]['frame_time']) > 60:
|
||||
obj['frame'] = np.copy(self.camera_data[camera]['current_frame'])
|
||||
best_objects[obj['label']] = obj
|
||||
# send updated snapshot over mqtt
|
||||
best_frame = cv2.cvtColor(obj['frame'], cv2.COLOR_RGB2BGR)
|
||||
ret, jpg = cv2.imencode('.jpg', best_frame)
|
||||
if ret:
|
||||
jpg_bytes = jpg.tobytes()
|
||||
self.client.publish(f"{self.topic_prefix}/{camera}/{obj['label']}/snapshot", jpg_bytes, retain=True)
|
||||
else:
|
||||
obj['frame'] = np.copy(self.camera_data[camera]['current_frame'])
|
||||
best_objects[obj['label']] = obj
|
||||
@@ -115,11 +228,28 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
###
|
||||
# Report over MQTT
|
||||
###
|
||||
# count objects with more than 2 entries in history by type
|
||||
|
||||
# get the zones that are relevant for this camera
|
||||
relevant_zones = [zone for zone, config in self.zone_config.items() if camera in config]
|
||||
for zone in relevant_zones:
|
||||
# create the set of labels in the current frame and previously reported
|
||||
labels_for_zone = set(current_objects_in_zones[zone] + list(self.zone_data[zone]['object_status'][camera].keys()))
|
||||
# for each label
|
||||
for label in labels_for_zone:
|
||||
# compute the current 'ON' vs 'OFF' status by checking if any camera sees the object in the zone
|
||||
previous_state = any([c[label] == 'ON' for c in self.zone_data[zone]['object_status'].values()])
|
||||
self.zone_data[zone]['object_status'][camera][label] = 'ON' if label in current_objects_in_zones[zone] else 'OFF'
|
||||
new_state = any([c[label] == 'ON' for c in self.zone_data[zone]['object_status'].values()])
|
||||
# if the value is changing, send over MQTT
|
||||
if previous_state == False and new_state == True:
|
||||
self.client.publish(f"{self.topic_prefix}/{zone}/{label}", 'ON', retain=False)
|
||||
elif previous_state == True and new_state == False:
|
||||
self.client.publish(f"{self.topic_prefix}/{zone}/{label}", 'OFF', retain=False)
|
||||
|
||||
# count by type
|
||||
obj_counter = Counter()
|
||||
for obj in tracked_objects.values():
|
||||
if len(obj['history']) > 1:
|
||||
obj_counter[obj['label']] += 1
|
||||
obj_counter[obj['label']] += 1
|
||||
|
||||
# report on detected objects
|
||||
for obj_name, count in obj_counter.items():
|
||||
|
||||
@@ -5,6 +5,8 @@ import cv2
|
||||
import itertools
|
||||
import copy
|
||||
import numpy as np
|
||||
import random
|
||||
import string
|
||||
import multiprocessing as mp
|
||||
from collections import defaultdict
|
||||
from scipy.spatial import distance as dist
|
||||
@@ -17,8 +19,10 @@ class ObjectTracker():
|
||||
self.max_disappeared = max_disappeared
|
||||
|
||||
def register(self, index, obj):
|
||||
id = f"{obj['frame_time']}-{index}"
|
||||
rand_id = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||
id = f"{obj['frame_time']}-{rand_id}"
|
||||
obj['id'] = id
|
||||
obj['start_time'] = obj['frame_time']
|
||||
obj['top_score'] = obj['score']
|
||||
self.add_history(obj)
|
||||
self.tracked_objects[id] = obj
|
||||
@@ -45,6 +49,9 @@ class ObjectTracker():
|
||||
}
|
||||
if 'history' in obj:
|
||||
obj['history'].append(entry)
|
||||
# only maintain the last 20 in history
|
||||
if len(obj['history']) > 20:
|
||||
obj['history'] = obj['history'][-20:]
|
||||
else:
|
||||
obj['history'] = [entry]
|
||||
|
||||
|
||||
@@ -140,11 +140,14 @@ def listen():
|
||||
signal.signal(signal.SIGUSR1, print_stack)
|
||||
|
||||
class PlasmaManager:
|
||||
def __init__(self):
|
||||
def __init__(self, stop_event=None):
|
||||
self.stop_event = stop_event
|
||||
self.connect()
|
||||
|
||||
def connect(self):
|
||||
while True:
|
||||
if self.stop_event != None and self.stop_event.is_set():
|
||||
return
|
||||
try:
|
||||
self.plasma_client = plasma.connect("/tmp/plasma")
|
||||
return
|
||||
@@ -155,6 +158,8 @@ class PlasmaManager:
|
||||
def get(self, name, timeout_ms=0):
|
||||
object_id = plasma.ObjectID(hashlib.sha1(str.encode(name)).digest())
|
||||
while True:
|
||||
if self.stop_event != None and self.stop_event.is_set():
|
||||
return
|
||||
try:
|
||||
return self.plasma_client.get(object_id, timeout_ms=timeout_ms)
|
||||
except:
|
||||
@@ -164,6 +169,8 @@ class PlasmaManager:
|
||||
def put(self, name, obj):
|
||||
object_id = plasma.ObjectID(hashlib.sha1(str.encode(name)).digest())
|
||||
while True:
|
||||
if self.stop_event != None and self.stop_event.is_set():
|
||||
return
|
||||
try:
|
||||
self.plasma_client.put(obj, object_id)
|
||||
return
|
||||
@@ -175,6 +182,8 @@ class PlasmaManager:
|
||||
def delete(self, name):
|
||||
object_id = plasma.ObjectID(hashlib.sha1(str.encode(name)).digest())
|
||||
while True:
|
||||
if self.stop_event != None and self.stop_event.is_set():
|
||||
return
|
||||
try:
|
||||
self.plasma_client.delete([object_id])
|
||||
return
|
||||
|
||||
@@ -12,6 +12,7 @@ import numpy as np
|
||||
import copy
|
||||
import itertools
|
||||
import json
|
||||
import base64
|
||||
from collections import defaultdict
|
||||
from frigate.util import draw_box_with_label, area, calculate_region, clipped, intersection_over_union, intersection, EventsPerSecond, listen, PlasmaManager
|
||||
from frigate.objects import ObjectTracker
|
||||
@@ -115,7 +116,7 @@ def start_or_restart_ffmpeg(ffmpeg_cmd, frame_size, ffmpeg_process=None):
|
||||
return process
|
||||
|
||||
class CameraCapture(threading.Thread):
|
||||
def __init__(self, name, ffmpeg_process, frame_shape, frame_queue, take_frame, fps, detection_frame):
|
||||
def __init__(self, name, ffmpeg_process, frame_shape, frame_queue, take_frame, fps, detection_frame, stop_event):
|
||||
threading.Thread.__init__(self)
|
||||
self.name = name
|
||||
self.frame_shape = frame_shape
|
||||
@@ -124,16 +125,21 @@ class CameraCapture(threading.Thread):
|
||||
self.take_frame = take_frame
|
||||
self.fps = fps
|
||||
self.skipped_fps = EventsPerSecond()
|
||||
self.plasma_client = PlasmaManager()
|
||||
self.plasma_client = PlasmaManager(stop_event)
|
||||
self.ffmpeg_process = ffmpeg_process
|
||||
self.current_frame = 0
|
||||
self.last_frame = 0
|
||||
self.detection_frame = detection_frame
|
||||
self.stop_event = stop_event
|
||||
|
||||
def run(self):
|
||||
frame_num = 0
|
||||
self.skipped_fps.start()
|
||||
while True:
|
||||
if self.stop_event.is_set():
|
||||
print(f"{self.name}: stop event set. exiting capture thread...")
|
||||
break
|
||||
|
||||
if self.ffmpeg_process.poll() != None:
|
||||
print(f"{self.name}: ffmpeg process is not running. exiting capture thread...")
|
||||
break
|
||||
@@ -189,7 +195,12 @@ def track_camera(name, config, global_objects_config, frame_queue, frame_shape,
|
||||
|
||||
# load in the mask for object detection
|
||||
if 'mask' in config:
|
||||
mask = cv2.imread("/config/{}".format(config['mask']), cv2.IMREAD_GRAYSCALE)
|
||||
if config['mask'].startswith('base64,'):
|
||||
img = base64.b64decode(config['mask'][7:])
|
||||
npimg = np.fromstring(img, dtype=np.uint8)
|
||||
mask = cv2.imdecode(npimg, cv2.IMREAD_GRAYSCALE)
|
||||
else:
|
||||
mask = cv2.imread("/config/{}".format(config['mask']), cv2.IMREAD_GRAYSCALE)
|
||||
else:
|
||||
mask = None
|
||||
|
||||
|
||||
Reference in New Issue
Block a user