Compare commits

..

28 Commits

Author SHA1 Message Date
Blake Blackshear
7686c510b3 add a few more metrics to debug 2020-02-23 18:11:39 -06:00
Blake Blackshear
2f5e322d3c cleanup the plasma store when finished with a frame 2020-02-23 18:11:08 -06:00
Blake Blackshear
1cd4c12104 dont redirect stdout for plasma store 2020-02-23 15:53:17 -06:00
Blake Blackshear
1a8b034685 reset detection fps 2020-02-23 15:53:00 -06:00
Blake Blackshear
da6dc03a57 dont change dictionary while iterating 2020-02-23 11:18:00 -06:00
Blake Blackshear
7fa3b70d2d allow specifying the frame size in the config instead of detecting 2020-02-23 07:56:14 -06:00
Blake Blackshear
1fc5a2bfd4 ensure missing objects are expired even when other object types are in the frame 2020-02-23 07:55:51 -06:00
Blake Blackshear
7e84da7dad Fix watchdog last_frame calculation 2020-02-23 07:55:16 -06:00
Blake Blackshear
128be72e28 cleanup 2020-02-22 09:15:29 -06:00
Blake Blackshear
aaddedc95c update docs and add back benchmark 2020-02-22 09:10:37 -06:00
Blake Blackshear
ba919fb439 fix watchdog 2020-02-22 09:10:37 -06:00
Blake Blackshear
b1d563f3c4 check avg wait before dropping frames 2020-02-22 09:10:37 -06:00
Blake Blackshear
204d8af5df fix watchdog restart 2020-02-22 09:10:37 -06:00
Blake Blackshear
b507a73d79 improve watchdog and coral fps tracking 2020-02-22 09:10:37 -06:00
Blake Blackshear
66eeb8b5cb dont log http requests 2020-02-22 09:10:37 -06:00
Blake Blackshear
efa67067c6 cleanup 2020-02-22 09:10:37 -06:00
Blake Blackshear
aeb036f1a4 add models and convert speed to ms 2020-02-22 09:10:37 -06:00
Blake Blackshear
74c528f9dc add watchdog for camera processes 2020-02-22 09:10:34 -06:00
Blake Blackshear
f2d54bec43 cleanup old code 2020-02-22 09:09:36 -06:00
Blake Blackshear
f07d57741e add a min_fps option 2020-02-22 09:06:46 -06:00
Blake Blackshear
2c1ec19f98 check plasma store and consolidate frame drawing 2020-02-22 09:06:46 -06:00
Blake Blackshear
6a9027c002 split into separate processes 2020-02-22 09:06:43 -06:00
Blake Blackshear
60c15e4419 update tflite to 2.1.0 2020-02-22 09:05:26 -06:00
Blake Blackshear
03dbf600aa refactor some classes into new files 2020-02-22 09:05:26 -06:00
Blake Blackshear
fbbb79b31b tweak process handoff 2020-02-22 09:05:26 -06:00
Blake Blackshear
496c6bc6c4 Mostly working detection in a separate process 2020-02-22 09:05:26 -06:00
Blake Blackshear
869a81c944 read from ffmpeg 2020-02-22 09:05:26 -06:00
Blake Blackshear
5b1884cfb3 WIP: revamp to incorporate motion 2020-02-22 09:05:26 -06:00
8 changed files with 135 additions and 224 deletions

View File

@@ -38,9 +38,9 @@ RUN apt -qq update && apt -qq install --no-install-recommends -y \
&& apt -qq install --no-install-recommends -y \
libedgetpu1-max \
## Tensorflow lite (python 3.7 only)
&& wget -q https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp37-cp37m-linux_x86_64.whl \
&& python3.7 -m pip install tflite_runtime-2.1.0.post1-cp37-cp37m-linux_x86_64.whl \
&& rm tflite_runtime-2.1.0.post1-cp37-cp37m-linux_x86_64.whl \
&& wget -q https://dl.google.com/coral/python/tflite_runtime-2.1.0-cp37-cp37m-linux_x86_64.whl \
&& python3.7 -m pip install tflite_runtime-2.1.0-cp37-cp37m-linux_x86_64.whl \
&& rm tflite_runtime-2.1.0-cp37-cp37m-linux_x86_64.whl \
&& rm -rf /var/lib/apt/lists/* \
&& (apt-get autoremove -y; apt-get autoclean -y)

View File

@@ -16,6 +16,16 @@ You see multiple bounding boxes because it draws bounding boxes from all frames
[![](http://img.youtube.com/vi/nqHbCtyo4dY/0.jpg)](http://www.youtube.com/watch?v=nqHbCtyo4dY "Frigate")
## Getting Started
Build the container with
```
docker build -t frigate .
```
Models for both CPU and EdgeTPU (Coral) are bundled in the image. You can use your own models with volume mounts:
- CPU Model: `/cpu_model.tflite`
- EdgeTPU Model: `/edgetpu_model.tflite`
- Labels: `/labelmap.txt`
Run the container with
```bash
docker run --rm \
@@ -26,7 +36,7 @@ docker run --rm \
-v /etc/localtime:/etc/localtime:ro \
-p 5000:5000 \
-e FRIGATE_RTSP_PASSWORD='password' \
blakeblackshear/frigate:stable
frigate:latest
```
Example docker-compose:
@@ -36,7 +46,7 @@ Example docker-compose:
restart: unless-stopped
privileged: true
shm_size: '1g' # should work for 5-7 cameras
image: blakeblackshear/frigate:stable
image: frigate:latest
volumes:
- /dev/bus/usb:/dev/bus/usb
- /etc/localtime:/etc/localtime:ro
@@ -117,11 +127,6 @@ sensor:
value_template: '{{ states.sensor.frigate_debug.attributes["coral"]["inference_speed"] }}'
unit_of_measurement: 'ms'
```
## Using a custom model
Models for both CPU and EdgeTPU (Coral) are bundled in the image. You can use your own models with volume mounts:
- CPU Model: `/cpu_model.tflite`
- EdgeTPU Model: `/edgetpu_model.tflite`
- Labels: `/labelmap.txt`
## Tips
- Lower the framerate of the video feed on the camera to reduce the CPU usage for capturing the feed

View File

@@ -1,79 +1,18 @@
import os
from statistics import mean
import multiprocessing as mp
import statistics
import numpy as np
import datetime
from frigate.edgetpu import ObjectDetector, EdgeTPUProcess, RemoteObjectDetector, load_labels
import time
from frigate.edgetpu import ObjectDetector
my_frame = np.expand_dims(np.full((300,300,3), 1, np.uint8), axis=0)
labels = load_labels('/labelmap.txt')
object_detector = ObjectDetector()
######
# Minimal same process runner
######
# object_detector = ObjectDetector()
# tensor_input = np.expand_dims(np.full((300,300,3), 0, np.uint8), axis=0)
frame = np.zeros((300,300,3), np.uint8)
input_frame = np.expand_dims(frame, axis=0)
# start = datetime.datetime.now().timestamp()
detection_times = []
# frame_times = []
# for x in range(0, 1000):
# start_frame = datetime.datetime.now().timestamp()
for x in range(0, 100):
start = time.monotonic()
object_detector.detect_raw(input_frame)
detection_times.append(time.monotonic()-start)
# tensor_input[:] = my_frame
# detections = object_detector.detect_raw(tensor_input)
# parsed_detections = []
# for d in detections:
# if d[1] < 0.4:
# break
# parsed_detections.append((
# labels[int(d[0])],
# float(d[1]),
# (d[2], d[3], d[4], d[5])
# ))
# frame_times.append(datetime.datetime.now().timestamp()-start_frame)
# duration = datetime.datetime.now().timestamp()-start
# print(f"Processed for {duration:.2f} seconds.")
# print(f"Average frame processing time: {mean(frame_times)*1000:.2f}ms")
######
# Separate process runner
######
def start(id, num_detections, detection_queue):
object_detector = RemoteObjectDetector(str(id), '/labelmap.txt', detection_queue)
start = datetime.datetime.now().timestamp()
frame_times = []
for x in range(0, num_detections):
start_frame = datetime.datetime.now().timestamp()
detections = object_detector.detect(my_frame)
frame_times.append(datetime.datetime.now().timestamp()-start_frame)
duration = datetime.datetime.now().timestamp()-start
print(f"{id} - Processed for {duration:.2f} seconds.")
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
edgetpu_process = EdgeTPUProcess()
# start(1, 1000, edgetpu_process.detect_lock, edgetpu_process.detect_ready, edgetpu_process.frame_ready)
####
# Multiple camera processes
####
camera_processes = []
for x in range(0, 10):
camera_process = mp.Process(target=start, args=(x, 100, edgetpu_process.detection_queue))
camera_process.daemon = True
camera_processes.append(camera_process)
start = datetime.datetime.now().timestamp()
for p in camera_processes:
p.start()
for p in camera_processes:
p.join()
duration = datetime.datetime.now().timestamp()-start
print(f"Total - Processed for {duration:.2f} seconds.")
print(f"Average inference time: {statistics.mean(detection_times)*1000:.2f}ms")

View File

@@ -3,13 +3,9 @@ web_port: 5000
mqtt:
host: mqtt.server.com
topic_prefix: frigate
# client_id: frigate # Optional -- set to override default client id of 'frigate' if running multiple instances
# user: username # Optional
#################
## Environment variables that begin with 'FRIGATE_' may be referenced in {}.
## password: '{FRIGATE_MQTT_PASSWORD}'
#################
# password: password # Optional
# client_id: frigate # Optional -- set to override default client id of 'frigate' if running multiple instances
# user: username # Optional -- Uncomment for use
# password: password # Optional -- Uncomment for use
#################
# Default ffmpeg args. Optional and can be overwritten per camera.

View File

@@ -1,4 +1,3 @@
import os
import cv2
import time
import datetime
@@ -9,7 +8,7 @@ import multiprocessing as mp
import subprocess as sp
import numpy as np
import logging
from flask import Flask, Response, make_response, jsonify, request
from flask import Flask, Response, make_response, jsonify
import paho.mqtt.client as mqtt
from frigate.video import track_camera
@@ -17,8 +16,6 @@ from frigate.object_processing import TrackedObjectProcessor
from frigate.util import EventsPerSecond
from frigate.edgetpu import EdgeTPUProcess
FRIGATE_VARS = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
with open('/config/config.yml') as f:
CONFIG = yaml.safe_load(f)
@@ -27,8 +24,6 @@ MQTT_PORT = CONFIG.get('mqtt', {}).get('port', 1883)
MQTT_TOPIC_PREFIX = CONFIG.get('mqtt', {}).get('topic_prefix', 'frigate')
MQTT_USER = CONFIG.get('mqtt', {}).get('user')
MQTT_PASS = CONFIG.get('mqtt', {}).get('password')
if not MQTT_PASS is None:
MQTT_PASS = MQTT_PASS.format(**FRIGATE_VARS)
MQTT_CLIENT_ID = CONFIG.get('mqtt', {}).get('client_id', 'frigate')
# Set the default FFmpeg config
@@ -36,7 +31,7 @@ FFMPEG_CONFIG = CONFIG.get('ffmpeg', {})
FFMPEG_DEFAULT_CONFIG = {
'global_args': FFMPEG_CONFIG.get('global_args',
['-hide_banner','-loglevel','panic']),
'hwaccel_args': FFMPEG_CONFIG.get('hwaccel_args',
'hwaccel_args': FFMPEG_CONFIG.get('hwaccel_args',
[]),
'input_args': FFMPEG_CONFIG.get('input_args',
['-avoid_negative_ts', 'make_zero',
@@ -73,21 +68,27 @@ class CameraWatchdog(threading.Thread):
# wait a bit before checking
time.sleep(30)
if (self.tflite_process.detection_start.value > 0.0 and
datetime.datetime.now().timestamp() - self.tflite_process.detection_start.value > 10):
print("Detection appears to be stuck. Restarting detection process")
self.tflite_process.start_or_restart()
time.sleep(30)
for name, camera_process in self.camera_processes.items():
process = camera_process['process']
if (not self.object_processor.get_current_frame_time(name) is None and
(datetime.datetime.now().timestamp() - self.object_processor.get_current_frame_time(name)) > 30):
print(f"Last frame for {name} is more than 30 seconds old...")
if process.is_alive():
process.terminate()
print("Waiting for process to exit gracefully...")
process.join(timeout=30)
if process.exitcode is None:
print("Process didnt exit. Force killing...")
process.kill()
process.join()
if not process.is_alive():
print(f"Process for {name} is not alive. Starting again...")
camera_process['fps'].value = float(self.config[name]['fps'])
camera_process['skipped_fps'].value = 0.0
camera_process['detection_fps'].value = 0.0
self.object_processor.camera_data[name]['current_frame_time'] = None
process = mp.Process(target=track_camera, args=(name, self.config[name], FFMPEG_DEFAULT_CONFIG, GLOBAL_OBJECT_CONFIG,
self.tflite_process.detection_queue, self.tracked_objects_queue,
self.tflite_process.detect_lock, self.tflite_process.detect_ready, self.tflite_process.frame_ready, self.tracked_objects_queue,
camera_process['fps'], camera_process['skipped_fps'], camera_process['detection_fps']))
process.daemon = True
camera_process['process'] = process
@@ -149,7 +150,7 @@ def main():
'detection_fps': mp.Value('d', 0.0)
}
camera_process = mp.Process(target=track_camera, args=(name, config, FFMPEG_DEFAULT_CONFIG, GLOBAL_OBJECT_CONFIG,
tflite_process.detection_queue, tracked_objects_queue,
tflite_process.detect_lock, tflite_process.detect_ready, tflite_process.frame_ready, tracked_objects_queue,
camera_processes[name]['fps'], camera_processes[name]['skipped_fps'], camera_processes[name]['detection_fps']))
camera_process.daemon = True
camera_processes[name]['process'] = camera_process
@@ -183,16 +184,14 @@ def main():
for name, camera_stats in camera_processes.items():
total_detection_fps += camera_stats['detection_fps'].value
stats[name] = {
'fps': round(camera_stats['fps'].value, 2),
'skipped_fps': round(camera_stats['skipped_fps'].value, 2),
'detection_fps': round(camera_stats['detection_fps'].value, 2)
'fps': camera_stats['fps'].value,
'skipped_fps': camera_stats['skipped_fps'].value,
'detection_fps': camera_stats['detection_fps'].value
}
stats['coral'] = {
'fps': round(total_detection_fps, 2),
'inference_speed': round(tflite_process.avg_inference_speed.value*1000, 2),
'detection_queue': tflite_process.detection_queue.qsize(),
'detection_start': tflite_process.detection_start.value
'fps': total_detection_fps,
'inference_speed': round(tflite_process.avg_inference_speed.value*1000, 2)
}
rc = plasma_process.poll()
@@ -218,26 +217,21 @@ def main():
@app.route('/<camera_name>')
def mjpeg_feed(camera_name):
fps = int(request.args.get('fps', '3'))
height = int(request.args.get('h', '360'))
if camera_name in CONFIG['cameras']:
# return a multipart response
return Response(imagestream(camera_name, fps, height),
return Response(imagestream(camera_name),
mimetype='multipart/x-mixed-replace; boundary=frame')
else:
return "Camera named {} not found".format(camera_name), 404
def imagestream(camera_name, fps, height):
def imagestream(camera_name):
while True:
# max out at specified FPS
time.sleep(1/fps)
# max out at 1 FPS
time.sleep(1)
frame = object_processor.get_current_frame(camera_name)
if frame is None:
frame = np.zeros((height,int(height*16/9),3), np.uint8)
frame = cv2.resize(frame, dsize=(int(height*16/9), height), interpolation=cv2.INTER_LINEAR)
frame = np.zeros((720,1280,3), np.uint8)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
ret, jpg = cv2.imencode('.jpg', frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')

View File

@@ -1,10 +1,8 @@
import os
import datetime
import hashlib
import multiprocessing as mp
import numpy as np
import SharedArray as sa
import pyarrow.plasma as plasma
import tflite_runtime.interpreter as tflite
from tflite_runtime.interpreter import load_delegate
from frigate.util import EventsPerSecond
@@ -62,81 +60,77 @@ class ObjectDetector():
return detections
def run_detector(detection_queue, avg_speed, start):
print(f"Starting detection process: {os.getpid()}")
plasma_client = plasma.connect("/tmp/plasma")
object_detector = ObjectDetector()
while True:
object_id_str = detection_queue.get()
object_id_hash = hashlib.sha1(str.encode(object_id_str))
object_id = plasma.ObjectID(object_id_hash.digest())
object_id_out = plasma.ObjectID(hashlib.sha1(str.encode(f"out-{object_id_str}")).digest())
input_frame = plasma_client.get(object_id, timeout_ms=0)
if input_frame is plasma.ObjectNotAvailable:
continue
# detect and put the output in the plasma store
start.value = datetime.datetime.now().timestamp()
plasma_client.put(object_detector.detect_raw(input_frame), object_id_out)
duration = datetime.datetime.now().timestamp()-start.value
start.value = 0.0
avg_speed.value = (avg_speed.value*9 + duration)/10
class EdgeTPUProcess():
def __init__(self):
self.detection_queue = mp.Queue()
self.avg_inference_speed = mp.Value('d', 0.01)
self.detection_start = mp.Value('d', 0.0)
self.detect_process = None
self.start_or_restart()
# TODO: see if we can use the plasma store with a queue and maintain the same speeds
try:
sa.delete("frame")
except:
pass
try:
sa.delete("detections")
except:
pass
def start_or_restart(self):
self.detection_start.value = 0.0
if (not self.detect_process is None) and self.detect_process.is_alive():
self.detect_process.terminate()
print("Waiting for detection process to exit gracefully...")
self.detect_process.join(timeout=30)
if self.detect_process.exitcode is None:
print("Detection process didnt exit. Force killing...")
self.detect_process.kill()
self.detect_process.join()
self.detect_process = mp.Process(target=run_detector, args=(self.detection_queue, self.avg_inference_speed, self.detection_start))
self.input_frame = sa.create("frame", shape=(1,300,300,3), dtype=np.uint8)
self.detections = sa.create("detections", shape=(20,6), dtype=np.float32)
self.detect_lock = mp.Lock()
self.detect_ready = mp.Event()
self.frame_ready = mp.Event()
self.avg_inference_speed = mp.Value('d', 0.01)
def run_detector(detect_ready, frame_ready, avg_speed):
print(f"Starting detection process: {os.getpid()}")
object_detector = ObjectDetector()
input_frame = sa.attach("frame")
detections = sa.attach("detections")
while True:
# wait until a frame is ready
frame_ready.wait()
start = datetime.datetime.now().timestamp()
# signal that the process is busy
frame_ready.clear()
detections[:] = object_detector.detect_raw(input_frame)
# signal that the process is ready to detect
detect_ready.set()
duration = datetime.datetime.now().timestamp()-start
avg_speed.value = (avg_speed.value*9 + duration)/10
self.detect_process = mp.Process(target=run_detector, args=(self.detect_ready, self.frame_ready, self.avg_inference_speed))
self.detect_process.daemon = True
self.detect_process.start()
class RemoteObjectDetector():
def __init__(self, name, labels, detection_queue):
def __init__(self, labels, detect_lock, detect_ready, frame_ready):
self.labels = load_labels(labels)
self.name = name
self.input_frame = sa.attach("frame")
self.detections = sa.attach("detections")
self.fps = EventsPerSecond()
self.plasma_client = plasma.connect("/tmp/plasma")
self.detection_queue = detection_queue
self.detect_lock = detect_lock
self.detect_ready = detect_ready
self.frame_ready = frame_ready
def detect(self, tensor_input, threshold=.4):
detections = []
now = f"{self.name}-{str(datetime.datetime.now().timestamp())}"
object_id_frame = plasma.ObjectID(hashlib.sha1(str.encode(now)).digest())
object_id_detections = plasma.ObjectID(hashlib.sha1(str.encode(f"out-{now}")).digest())
self.plasma_client.put(tensor_input, object_id_frame)
self.detection_queue.put(now)
raw_detections = self.plasma_client.get(object_id_detections, timeout_ms=10000)
if raw_detections is plasma.ObjectNotAvailable:
self.plasma_client.delete([object_id_frame])
return detections
for d in raw_detections:
if d[1] < threshold:
break
detections.append((
self.labels[int(d[0])],
float(d[1]),
(d[2], d[3], d[4], d[5])
))
self.plasma_client.delete([object_id_frame, object_id_detections])
with self.detect_lock:
self.input_frame[:] = tensor_input
# unset detections and signal that a frame is ready
self.detect_ready.clear()
self.frame_ready.set()
# wait until the detection process is finished,
self.detect_ready.wait()
for d in self.detections:
if d[1] < threshold:
break
detections.append((
self.labels[int(d[0])],
float(d[1]),
(d[2], d[3], d[4], d[5])
))
self.fps.update()
return detections

View File

@@ -34,6 +34,7 @@ class TrackedObjectProcessor(threading.Thread):
'best_objects': {},
'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')),
'tracked_objects': {},
'current_frame_time': None,
'current_frame': np.zeros((720,1280,3), np.uint8),
'object_id': None
})
@@ -46,6 +47,9 @@ class TrackedObjectProcessor(threading.Thread):
def get_current_frame(self, camera):
return self.camera_data[camera]['current_frame']
def get_current_frame_time(self, camera):
return self.camera_data[camera]['current_frame_time']
def run(self):
while True:
@@ -89,6 +93,7 @@ class TrackedObjectProcessor(threading.Thread):
# Set the current frame as ready
###
self.camera_data[camera]['current_frame'] = current_frame
self.camera_data[camera]['current_frame_time'] = frame_time
# store the object id, so you can delete it at the next loop
previous_object_id = self.camera_data[camera]['object_id']

View File

@@ -98,23 +98,7 @@ def create_tensor_input(frame, region):
# Expand dimensions since the model expects images to have shape: [1, 300, 300, 3]
return np.expand_dims(cropped_frame, axis=0)
def start_or_restart_ffmpeg(ffmpeg_cmd, frame_size, ffmpeg_process=None):
if not ffmpeg_process is None:
print("Terminating the existing ffmpeg process...")
ffmpeg_process.terminate()
try:
print("Waiting for ffmpeg to exit gracefully...")
ffmpeg_process.wait(timeout=30)
except sp.TimeoutExpired:
print("FFmpeg didnt exit. Force killing...")
ffmpeg_process.kill()
ffmpeg_process.wait()
print("Creating ffmpeg process...")
print(" ".join(ffmpeg_cmd))
return sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, bufsize=frame_size*10)
def track_camera(name, config, ffmpeg_global_config, global_objects_config, detection_queue, detected_objects_queue, fps, skipped_fps, detection_fps):
def track_camera(name, config, ffmpeg_global_config, global_objects_config, detect_lock, detect_ready, frame_ready, detected_objects_queue, fps, skipped_fps, detection_fps):
print(f"Starting process for {name}: {os.getpid()}")
# Merge the ffmpeg config with the global config
@@ -124,13 +108,6 @@ def track_camera(name, config, ffmpeg_global_config, global_objects_config, dete
ffmpeg_hwaccel_args = ffmpeg.get('hwaccel_args', ffmpeg_global_config['hwaccel_args'])
ffmpeg_input_args = ffmpeg.get('input_args', ffmpeg_global_config['input_args'])
ffmpeg_output_args = ffmpeg.get('output_args', ffmpeg_global_config['output_args'])
ffmpeg_cmd = (['ffmpeg'] +
ffmpeg_global_args +
ffmpeg_hwaccel_args +
ffmpeg_input_args +
['-i', ffmpeg_input] +
ffmpeg_output_args +
['pipe:'])
# Merge the tracked object config with the global config
camera_objects_config = config.get('objects', {})
@@ -172,11 +149,21 @@ def track_camera(name, config, ffmpeg_global_config, global_objects_config, dete
mask[:] = 255
motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue)
object_detector = RemoteObjectDetector('/labelmap.txt', detect_lock, detect_ready, frame_ready)
object_tracker = ObjectTracker(10)
ffmpeg_cmd = (['ffmpeg'] +
ffmpeg_global_args +
ffmpeg_hwaccel_args +
ffmpeg_input_args +
['-i', ffmpeg_input] +
ffmpeg_output_args +
['pipe:'])
print(" ".join(ffmpeg_cmd))
ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, frame_size)
ffmpeg_process = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, bufsize=frame_size)
plasma_client = plasma.connect("/tmp/plasma")
frame_num = 0
@@ -193,14 +180,7 @@ def track_camera(name, config, ffmpeg_global_config, global_objects_config, dete
avg_wait = (avg_wait*99+duration)/100
if not frame_bytes:
rc = ffmpeg_process.poll()
if rc is not None:
print(f"{name}: ffmpeg_process exited unexpectedly with {rc}")
ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, frame_size, ffmpeg_process)
time.sleep(10)
else:
print(f"{name}: ffmpeg_process is still running but didnt return any bytes")
continue
break
# limit frame rate
frame_num += 1
@@ -373,5 +353,3 @@ def track_camera(name, config, ffmpeg_global_config, global_objects_config, dete
plasma_client.put(frame, plasma.ObjectID(object_id))
# add to the queue
detected_objects_queue.put((name, frame_time, object_tracker.tracked_objects))
print(f"{name}: exiting subprocess")