Compare commits
157 Commits
v0.7.0-rc1
...
v0.8.0-bet
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e05b27b8dc | ||
|
|
7111bd208e | ||
|
|
04a80280da | ||
|
|
3bda092140 | ||
|
|
9086820479 | ||
|
|
d1da57aedc | ||
|
|
6ded12c566 | ||
|
|
70352566a7 | ||
|
|
cf5cc86588 | ||
|
|
e41db49ab8 | ||
|
|
1b7effafee | ||
|
|
69e9e0b0bf | ||
|
|
89624df411 | ||
|
|
d1a7405211 | ||
|
|
040f8c7c20 | ||
|
|
6d7acabf4c | ||
|
|
45a8b42157 | ||
|
|
8785be24b7 | ||
|
|
cc0812540c | ||
|
|
5cf38ca4f7 | ||
|
|
7e4395c30e | ||
|
|
598d3aeda2 | ||
|
|
012dbf81f7 | ||
|
|
f869def12e | ||
|
|
31f7666337 | ||
|
|
9e339acbca | ||
|
|
8f8054a299 | ||
|
|
f7021eec4c | ||
|
|
c124153da4 | ||
|
|
706c2f921e | ||
|
|
de1d66bcb9 | ||
|
|
4502ca8e80 | ||
|
|
32a66fe5e8 | ||
|
|
e1251aafdb | ||
|
|
587494068c | ||
|
|
7a4d90a47a | ||
|
|
d06b587d33 | ||
|
|
eef70e434b | ||
|
|
b39da3ee01 | ||
|
|
e07c4e0d8c | ||
|
|
2f41ba6f77 | ||
|
|
bf95af0f22 | ||
|
|
2e15847f86 | ||
|
|
5992e85dc8 | ||
|
|
24d416b869 | ||
|
|
5dbf368c4b | ||
|
|
7d56fe105f | ||
|
|
e9327aa18c | ||
|
|
df56e079de | ||
|
|
8c5bfbd187 | ||
|
|
2613e74f97 | ||
|
|
9a7fb96357 | ||
|
|
37f9dfed92 | ||
|
|
68c1544808 | ||
|
|
2b3d3c5824 | ||
|
|
efea87a3ea | ||
|
|
977785fb10 | ||
|
|
4e113e62c0 | ||
|
|
5080b2d781 | ||
|
|
5cfd6d1edb | ||
|
|
27ae4d8ab0 | ||
|
|
3db33302ec | ||
|
|
f2910d48e0 | ||
|
|
cf0f8892e2 | ||
|
|
4d22e172ff | ||
|
|
8874a55b0f | ||
|
|
24b703a875 | ||
|
|
8b8f5b5c40 | ||
|
|
eac81136d2 | ||
|
|
d1e27b43ea | ||
|
|
105dcb7094 | ||
|
|
c0a16efdc1 | ||
|
|
2800c54743 | ||
|
|
2a24e8abcb | ||
|
|
37ee746ebb | ||
|
|
7ee6bfe855 | ||
|
|
40f57a8754 | ||
|
|
e0da462223 | ||
|
|
47a9fc4292 | ||
|
|
03fe5158db | ||
|
|
72be6b480d | ||
|
|
a8964dcc1f | ||
|
|
732e91ee42 | ||
|
|
27da080ce6 | ||
|
|
075d06b108 | ||
|
|
95dc17ffcd | ||
|
|
408b53f8b4 | ||
|
|
3ef68a297a | ||
|
|
3e9b3711dc | ||
|
|
a1cc9ad1f0 | ||
|
|
29e8aa4020 | ||
|
|
777aff403f | ||
|
|
4b3b702459 | ||
|
|
893e6b40a7 | ||
|
|
a85d780020 | ||
|
|
34439699ae | ||
|
|
64b63142b1 | ||
|
|
cee1ab000b | ||
|
|
3ff98770c1 | ||
|
|
244203463d | ||
|
|
b6f7940b10 | ||
|
|
75312602aa | ||
|
|
75977128f0 | ||
|
|
eafde6c677 | ||
|
|
da0598baef | ||
|
|
35ba5e2f7c | ||
|
|
49258d6dbe | ||
|
|
5a081e4f00 | ||
|
|
4feae472e9 | ||
|
|
4e83239258 | ||
|
|
c4cccf44a5 | ||
|
|
64e7cbcc62 | ||
|
|
dd86e4f317 | ||
|
|
4db285a875 | ||
|
|
939d1ba091 | ||
|
|
0fe8d486d9 | ||
|
|
a3cb02af5c | ||
|
|
45a6b8452c | ||
|
|
9d594cc640 | ||
|
|
59e41ae1ac | ||
|
|
c6ed16465b | ||
|
|
8f14b36f5a | ||
|
|
b6c2491e3b | ||
|
|
8e31d04d90 | ||
|
|
bf93fbb357 | ||
|
|
c064b244db | ||
|
|
0280610e96 | ||
|
|
4363623c45 | ||
|
|
c960914ec3 | ||
|
|
9ecc80b443 | ||
|
|
3e146de0a2 | ||
|
|
bee54c39dc | ||
|
|
623d138d60 | ||
|
|
76befc1249 | ||
|
|
51251b9fb0 | ||
|
|
8c45076bb6 | ||
|
|
7d683ef399 | ||
|
|
e4da3822b1 | ||
|
|
12c4cd77c5 | ||
|
|
a611cbb942 | ||
|
|
f946813ccb | ||
|
|
49fca1b839 | ||
|
|
54cb4a2180 | ||
|
|
9954e3b11e | ||
|
|
82692b0ddc | ||
|
|
9d4fdec12f | ||
|
|
ed72c995ef | ||
|
|
66c77d1157 | ||
|
|
40c322ad47 | ||
|
|
83f1e0d713 | ||
|
|
2d89044bd3 | ||
|
|
dc4d24c2b9 | ||
|
|
d5fb20c524 | ||
|
|
7e92e8bfe8 | ||
|
|
efdcfcef97 | ||
|
|
574ee2a46f | ||
|
|
ec4d048905 |
@@ -1,5 +1,5 @@
|
|||||||
README.md
|
README.md
|
||||||
diagram.png
|
docs/
|
||||||
.gitignore
|
.gitignore
|
||||||
debug
|
debug
|
||||||
config/
|
config/
|
||||||
|
|||||||
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -14,8 +14,8 @@ A clear and concise description of what the bug is.
|
|||||||
What version are you using?
|
What version are you using?
|
||||||
|
|
||||||
**Config file**
|
**Config file**
|
||||||
Include your full config file wrapped in back ticks.
|
Include your full config file wrapped in triple back ticks.
|
||||||
```
|
```yaml
|
||||||
config here
|
config here
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
46
Makefile
@@ -4,7 +4,7 @@ amd64_wheels:
|
|||||||
docker build --tag blakeblackshear/frigate-wheels:amd64 --file docker/Dockerfile.wheels .
|
docker build --tag blakeblackshear/frigate-wheels:amd64 --file docker/Dockerfile.wheels .
|
||||||
|
|
||||||
amd64_ffmpeg:
|
amd64_ffmpeg:
|
||||||
docker build --tag blakeblackshear/frigate-ffmpeg:amd64 --file docker/Dockerfile.ffmpeg.amd64 .
|
docker build --tag blakeblackshear/frigate-ffmpeg:1.0.0-amd64 --file docker/Dockerfile.ffmpeg.amd64 .
|
||||||
|
|
||||||
amd64_frigate:
|
amd64_frigate:
|
||||||
docker build --tag frigate-base --build-arg ARCH=amd64 --file docker/Dockerfile.base .
|
docker build --tag frigate-base --build-arg ARCH=amd64 --file docker/Dockerfile.base .
|
||||||
@@ -12,26 +12,38 @@ amd64_frigate:
|
|||||||
|
|
||||||
amd64_all: amd64_wheels amd64_ffmpeg amd64_frigate
|
amd64_all: amd64_wheels amd64_ffmpeg amd64_frigate
|
||||||
|
|
||||||
arm64_wheels:
|
amd64nvidia_wheels:
|
||||||
docker build --tag blakeblackshear/frigate-wheels:arm64 --file docker/Dockerfile.wheels.arm64 .
|
docker build --tag blakeblackshear/frigate-wheels:amd64nvidia --file docker/Dockerfile.wheels .
|
||||||
|
|
||||||
arm64_ffmpeg:
|
amd64nvidia_ffmpeg:
|
||||||
docker build --tag blakeblackshear/frigate-ffmpeg:arm64 --file docker/Dockerfile.ffmpeg.arm64 .
|
docker build --tag blakeblackshear/frigate-ffmpeg:1.0.0-amd64nvidia --file docker/Dockerfile.ffmpeg.amd64nvidia .
|
||||||
|
|
||||||
arm64_frigate:
|
amd64nvidia_frigate:
|
||||||
docker build --tag frigate-base --build-arg ARCH=arm64 --file docker/Dockerfile.base .
|
docker build --tag frigate-base --build-arg ARCH=amd64nvidia --file docker/Dockerfile.base .
|
||||||
docker build --tag frigate --file docker/Dockerfile.arm64 .
|
docker build --tag frigate --file docker/Dockerfile.amd64nvidia .
|
||||||
|
|
||||||
armv7hf_all: arm64_wheels arm64_ffmpeg arm64_frigate
|
amd64nvidia_all: amd64nvidia_wheels amd64nvidia_ffmpeg amd64nvidia_frigate
|
||||||
|
|
||||||
armv7hf_wheels:
|
aarch64_wheels:
|
||||||
docker build --tag blakeblackshear/frigate-wheels:armv7hf --file docker/Dockerfile.wheels .
|
docker build --tag blakeblackshear/frigate-wheels:aarch64 --file docker/Dockerfile.wheels.aarch64 .
|
||||||
|
|
||||||
armv7hf_ffmpeg:
|
aarch64_ffmpeg:
|
||||||
docker build --tag blakeblackshear/frigate-ffmpeg:armv7hf --file docker/Dockerfile.ffmpeg.armv7hf .
|
docker build --tag blakeblackshear/frigate-ffmpeg:1.0.0-aarch64 --file docker/Dockerfile.ffmpeg.aarch64 .
|
||||||
|
|
||||||
armv7hf_frigate:
|
aarch64_frigate:
|
||||||
docker build --tag frigate-base --build-arg ARCH=armv7hf --file docker/Dockerfile.base .
|
docker build --tag frigate-base --build-arg ARCH=aarch64 --file docker/Dockerfile.base .
|
||||||
docker build --tag frigate --file docker/Dockerfile.armv7hf .
|
docker build --tag frigate --file docker/Dockerfile.aarch64 .
|
||||||
|
|
||||||
armv7hf_all: armv7hf_wheels armv7hf_ffmpeg armv7hf_frigate
|
armv7_all: armv7_wheels armv7_ffmpeg armv7_frigate
|
||||||
|
|
||||||
|
armv7_wheels:
|
||||||
|
docker build --tag blakeblackshear/frigate-wheels:armv7 --file docker/Dockerfile.wheels .
|
||||||
|
|
||||||
|
armv7_ffmpeg:
|
||||||
|
docker build --tag blakeblackshear/frigate-ffmpeg:1.0.0-armv7 --file docker/Dockerfile.ffmpeg.armv7 .
|
||||||
|
|
||||||
|
armv7_frigate:
|
||||||
|
docker build --tag frigate-base --build-arg ARCH=armv7 --file docker/Dockerfile.base .
|
||||||
|
docker build --tag frigate --file docker/Dockerfile.armv7 .
|
||||||
|
|
||||||
|
armv7_all: armv7_wheels armv7_ffmpeg armv7_frigate
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 1.8 MiB |
@@ -1,231 +0,0 @@
|
|||||||
web_port: 5000
|
|
||||||
|
|
||||||
################
|
|
||||||
## List of detectors.
|
|
||||||
## Currently supported types: cpu, edgetpu
|
|
||||||
## EdgeTPU requires device as defined here: https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api
|
|
||||||
################
|
|
||||||
detectors:
|
|
||||||
coral:
|
|
||||||
type: edgetpu
|
|
||||||
device: usb
|
|
||||||
|
|
||||||
mqtt:
|
|
||||||
host: mqtt.server.com
|
|
||||||
topic_prefix: frigate
|
|
||||||
# client_id: frigate # Optional -- set to override default client id of 'frigate' if running multiple instances
|
|
||||||
# user: username # Optional
|
|
||||||
#################
|
|
||||||
## Environment variables that begin with 'FRIGATE_' may be referenced in {}.
|
|
||||||
## password: '{FRIGATE_MQTT_PASSWORD}'
|
|
||||||
#################
|
|
||||||
# password: password # Optional
|
|
||||||
|
|
||||||
################
|
|
||||||
# Global configuration for saving clips
|
|
||||||
################
|
|
||||||
save_clips:
|
|
||||||
###########
|
|
||||||
# Maximum length of time to retain video during long events.
|
|
||||||
# If an object is being tracked for longer than this amount of time, the cache
|
|
||||||
# will begin to expire and the resulting clip will be the last x seconds of the event.
|
|
||||||
###########
|
|
||||||
max_seconds: 300
|
|
||||||
|
|
||||||
#################
|
|
||||||
# Default ffmpeg args. Optional and can be overwritten per camera.
|
|
||||||
# Should work with most RTSP cameras that send h264 video
|
|
||||||
# Built from the properties below with:
|
|
||||||
# "ffmpeg" + global_args + input_args + "-i" + input + output_args
|
|
||||||
#################
|
|
||||||
# ffmpeg:
|
|
||||||
# global_args:
|
|
||||||
# - -hide_banner
|
|
||||||
# - -loglevel
|
|
||||||
# - panic
|
|
||||||
# hwaccel_args: []
|
|
||||||
# input_args:
|
|
||||||
# - -avoid_negative_ts
|
|
||||||
# - make_zero
|
|
||||||
# - -fflags
|
|
||||||
# - nobuffer
|
|
||||||
# - -flags
|
|
||||||
# - low_delay
|
|
||||||
# - -strict
|
|
||||||
# - experimental
|
|
||||||
# - -fflags
|
|
||||||
# - +genpts+discardcorrupt
|
|
||||||
# - -vsync
|
|
||||||
# - drop
|
|
||||||
# - -rtsp_transport
|
|
||||||
# - tcp
|
|
||||||
# - -stimeout
|
|
||||||
# - '5000000'
|
|
||||||
# - -use_wallclock_as_timestamps
|
|
||||||
# - '1'
|
|
||||||
# output_args:
|
|
||||||
# - -f
|
|
||||||
# - rawvideo
|
|
||||||
# - -pix_fmt
|
|
||||||
# - yuv420p
|
|
||||||
|
|
||||||
####################
|
|
||||||
# Global object configuration. Applies to all cameras
|
|
||||||
# unless overridden at the camera levels.
|
|
||||||
# Keys must be valid labels. By default, the model uses coco (https://dl.google.com/coral/canned_models/coco_labels.txt).
|
|
||||||
# All labels from the model are reported over MQTT. These values are used to filter out false positives.
|
|
||||||
# min_area (optional): minimum width*height of the bounding box for the detected object
|
|
||||||
# max_area (optional): maximum width*height of the bounding box for the detected object
|
|
||||||
# min_score (optional): minimum score for the object to initiate tracking
|
|
||||||
# threshold (optional): The minimum decimal percentage for tracked object's computed score to considered a true positive
|
|
||||||
####################
|
|
||||||
objects:
|
|
||||||
track:
|
|
||||||
- person
|
|
||||||
filters:
|
|
||||||
person:
|
|
||||||
min_area: 5000
|
|
||||||
max_area: 100000
|
|
||||||
min_score: 0.5
|
|
||||||
threshold: 0.85
|
|
||||||
|
|
||||||
cameras:
|
|
||||||
back:
|
|
||||||
ffmpeg:
|
|
||||||
################
|
|
||||||
# Source passed to ffmpeg after the -i parameter. Supports anything compatible with OpenCV and FFmpeg.
|
|
||||||
# Environment variables that begin with 'FRIGATE_' may be referenced in {}
|
|
||||||
################
|
|
||||||
input: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
|
||||||
#################
|
|
||||||
# These values will override default values for just this camera
|
|
||||||
#################
|
|
||||||
# global_args: []
|
|
||||||
# hwaccel_args: []
|
|
||||||
# input_args: []
|
|
||||||
# output_args: []
|
|
||||||
|
|
||||||
################
|
|
||||||
## Optionally specify the resolution of the video feed. Frigate will try to auto detect if not specified
|
|
||||||
################
|
|
||||||
# height: 1280
|
|
||||||
# width: 720
|
|
||||||
|
|
||||||
################
|
|
||||||
## Specify the framerate of your camera
|
|
||||||
##
|
|
||||||
## NOTE: This should only be set in the event ffmpeg is unable to determine your camera's framerate
|
|
||||||
## on its own and the reported framerate for your camera in frigate is well over what is expected.
|
|
||||||
################
|
|
||||||
# fps: 5
|
|
||||||
|
|
||||||
################
|
|
||||||
## Optional mask. Must be the same aspect ratio as your video feed. Value is any of the following:
|
|
||||||
## - name of a file in the config directory
|
|
||||||
## - base64 encoded image prefixed with 'base64,' eg. 'base64,asfasdfasdf....'
|
|
||||||
## - polygon of x,y coordinates prefixed with 'poly,' eg. 'poly,0,900,1080,900,1080,1920,0,1920'
|
|
||||||
##
|
|
||||||
## The mask works by looking at the bottom center of the bounding box for the detected
|
|
||||||
## person in the image. If that pixel in the mask is a black pixel, it ignores it as a
|
|
||||||
## false positive. In my mask, the grass and driveway visible from my backdoor camera
|
|
||||||
## are white. The garage doors, sky, and trees (anywhere it would be impossible for a
|
|
||||||
## person to stand) are black.
|
|
||||||
##
|
|
||||||
## Masked areas are also ignored for motion detection.
|
|
||||||
################
|
|
||||||
# mask: back-mask.bmp
|
|
||||||
|
|
||||||
################
|
|
||||||
# Allows you to limit the framerate within frigate for cameras that do not support
|
|
||||||
# custom framerates. A value of 1 tells frigate to look at every frame, 2 every 2nd frame,
|
|
||||||
# 3 every 3rd frame, etc.
|
|
||||||
################
|
|
||||||
take_frame: 1
|
|
||||||
|
|
||||||
################
|
|
||||||
# The number of seconds to retain the highest scoring image for the best.jpg endpoint before allowing it
|
|
||||||
# to be replaced by a newer image. Defaults to 60 seconds.
|
|
||||||
################
|
|
||||||
best_image_timeout: 60
|
|
||||||
|
|
||||||
################
|
|
||||||
# MQTT settings
|
|
||||||
################
|
|
||||||
# mqtt:
|
|
||||||
# crop_to_region: True
|
|
||||||
# snapshot_height: 300
|
|
||||||
|
|
||||||
################
|
|
||||||
# Zones
|
|
||||||
################
|
|
||||||
zones:
|
|
||||||
#################
|
|
||||||
# Name of the zone
|
|
||||||
################
|
|
||||||
front_steps:
|
|
||||||
####################
|
|
||||||
# A list of x,y coordinates to define the polygon of the zone. The top
|
|
||||||
# left corner is 0,0. Can also be a comma separated string of all x,y coordinates combined.
|
|
||||||
# The same zone name can exist across multiple cameras if they have overlapping FOVs.
|
|
||||||
# An object is determined to be in the zone based on whether or not the bottom center
|
|
||||||
# of it's bounding box is within the polygon. The polygon must have at least 3 points.
|
|
||||||
# Coordinates can be generated at https://www.image-map.net/
|
|
||||||
####################
|
|
||||||
coordinates:
|
|
||||||
- 545,1077
|
|
||||||
- 747,939
|
|
||||||
- 788,805
|
|
||||||
################
|
|
||||||
# Zone level object filters. These are applied in addition to the global and camera filters
|
|
||||||
# and should be more restrictive than the global and camera filters. The global and camera
|
|
||||||
# filters are applied upstream.
|
|
||||||
################
|
|
||||||
filters:
|
|
||||||
person:
|
|
||||||
min_area: 5000
|
|
||||||
max_area: 100000
|
|
||||||
threshold: 0.8
|
|
||||||
|
|
||||||
################
|
|
||||||
# This will save a clip for each tracked object by frigate along with a json file that contains
|
|
||||||
# data related to the tracked object. This works by telling ffmpeg to write video segments to /cache
|
|
||||||
# from the video stream without re-encoding. Clips are then created by using ffmpeg to merge segments
|
|
||||||
# without re-encoding. The segments saved are unaltered from what frigate receives to avoid re-encoding.
|
|
||||||
# They do not contain bounding boxes. These are optimized to capture "false_positive" examples for improving frigate.
|
|
||||||
#
|
|
||||||
# NOTE: This feature does not work if you have "-vsync drop" configured in your input params.
|
|
||||||
# This will only work for camera feeds that can be copied into the mp4 container format without
|
|
||||||
# encoding such as h264. It may not work for some types of streams.
|
|
||||||
################
|
|
||||||
save_clips:
|
|
||||||
enabled: False
|
|
||||||
#########
|
|
||||||
# Number of seconds before the event to include in the clips
|
|
||||||
#########
|
|
||||||
pre_capture: 30
|
|
||||||
#########
|
|
||||||
# Objects to save clips for. Defaults to all tracked object types.
|
|
||||||
#########
|
|
||||||
# objects:
|
|
||||||
# - person
|
|
||||||
|
|
||||||
################
|
|
||||||
# Configuration for the snapshots in the debug view and mqtt
|
|
||||||
################
|
|
||||||
snapshots:
|
|
||||||
show_timestamp: True
|
|
||||||
draw_zones: False
|
|
||||||
|
|
||||||
################
|
|
||||||
# Camera level object config. If defined, this is used instead of the global config.
|
|
||||||
################
|
|
||||||
objects:
|
|
||||||
track:
|
|
||||||
- person
|
|
||||||
- car
|
|
||||||
filters:
|
|
||||||
person:
|
|
||||||
min_area: 5000
|
|
||||||
max_area: 100000
|
|
||||||
min_score: 0.5
|
|
||||||
threshold: 0.85
|
|
||||||
@@ -1,462 +0,0 @@
|
|||||||
import faulthandler; faulthandler.enable()
|
|
||||||
import os
|
|
||||||
import signal
|
|
||||||
import sys
|
|
||||||
import traceback
|
|
||||||
import signal
|
|
||||||
import cv2
|
|
||||||
import time
|
|
||||||
import datetime
|
|
||||||
import queue
|
|
||||||
import yaml
|
|
||||||
import threading
|
|
||||||
import multiprocessing as mp
|
|
||||||
import subprocess as sp
|
|
||||||
import numpy as np
|
|
||||||
import logging
|
|
||||||
from flask import Flask, Response, make_response, jsonify, request
|
|
||||||
import paho.mqtt.client as mqtt
|
|
||||||
|
|
||||||
from frigate.video import track_camera, get_ffmpeg_input, get_frame_shape, CameraCapture, start_or_restart_ffmpeg
|
|
||||||
from frigate.object_processing import TrackedObjectProcessor
|
|
||||||
from frigate.events import EventProcessor
|
|
||||||
from frigate.util import EventsPerSecond
|
|
||||||
from frigate.edgetpu import EdgeTPUProcess
|
|
||||||
|
|
||||||
FRIGATE_VARS = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
|
|
||||||
|
|
||||||
with open('/config/config.yml') as f:
|
|
||||||
CONFIG = yaml.safe_load(f)
|
|
||||||
|
|
||||||
MQTT_HOST = CONFIG['mqtt']['host']
|
|
||||||
MQTT_PORT = CONFIG.get('mqtt', {}).get('port', 1883)
|
|
||||||
MQTT_TOPIC_PREFIX = CONFIG.get('mqtt', {}).get('topic_prefix', 'frigate')
|
|
||||||
MQTT_USER = CONFIG.get('mqtt', {}).get('user')
|
|
||||||
MQTT_PASS = CONFIG.get('mqtt', {}).get('password')
|
|
||||||
if not MQTT_PASS is None:
|
|
||||||
MQTT_PASS = MQTT_PASS.format(**FRIGATE_VARS)
|
|
||||||
MQTT_CLIENT_ID = CONFIG.get('mqtt', {}).get('client_id', 'frigate')
|
|
||||||
|
|
||||||
# Set the default FFmpeg config
|
|
||||||
FFMPEG_CONFIG = CONFIG.get('ffmpeg', {})
|
|
||||||
FFMPEG_DEFAULT_CONFIG = {
|
|
||||||
'global_args': FFMPEG_CONFIG.get('global_args',
|
|
||||||
['-hide_banner','-loglevel','panic']),
|
|
||||||
'hwaccel_args': FFMPEG_CONFIG.get('hwaccel_args',
|
|
||||||
[]),
|
|
||||||
'input_args': FFMPEG_CONFIG.get('input_args',
|
|
||||||
['-avoid_negative_ts', 'make_zero',
|
|
||||||
'-fflags', 'nobuffer',
|
|
||||||
'-flags', 'low_delay',
|
|
||||||
'-strict', 'experimental',
|
|
||||||
'-fflags', '+genpts+discardcorrupt',
|
|
||||||
'-rtsp_transport', 'tcp',
|
|
||||||
'-stimeout', '5000000',
|
|
||||||
'-use_wallclock_as_timestamps', '1']),
|
|
||||||
'output_args': FFMPEG_CONFIG.get('output_args',
|
|
||||||
['-f', 'rawvideo',
|
|
||||||
'-pix_fmt', 'yuv420p'])
|
|
||||||
}
|
|
||||||
|
|
||||||
GLOBAL_OBJECT_CONFIG = CONFIG.get('objects', {})
|
|
||||||
|
|
||||||
WEB_PORT = CONFIG.get('web_port', 5000)
|
|
||||||
DETECTORS = CONFIG.get('detectors', {'coral': {'type': 'edgetpu', 'device': 'usb'}})
|
|
||||||
|
|
||||||
class CameraWatchdog(threading.Thread):
|
|
||||||
def __init__(self, camera_processes, config, detectors, detection_queue, tracked_objects_queue, stop_event):
|
|
||||||
threading.Thread.__init__(self)
|
|
||||||
self.camera_processes = camera_processes
|
|
||||||
self.config = config
|
|
||||||
self.detectors = detectors
|
|
||||||
self.detection_queue = detection_queue
|
|
||||||
self.tracked_objects_queue = tracked_objects_queue
|
|
||||||
self.stop_event = stop_event
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
time.sleep(10)
|
|
||||||
while True:
|
|
||||||
# wait a bit before checking
|
|
||||||
time.sleep(10)
|
|
||||||
|
|
||||||
if self.stop_event.is_set():
|
|
||||||
print(f"Exiting watchdog...")
|
|
||||||
break
|
|
||||||
|
|
||||||
now = datetime.datetime.now().timestamp()
|
|
||||||
|
|
||||||
# check the detection processes
|
|
||||||
for detector in self.detectors.values():
|
|
||||||
detection_start = detector.detection_start.value
|
|
||||||
if (detection_start > 0.0 and
|
|
||||||
now - detection_start > 10):
|
|
||||||
print("Detection appears to be stuck. Restarting detection process")
|
|
||||||
detector.start_or_restart()
|
|
||||||
elif not detector.detect_process.is_alive():
|
|
||||||
print("Detection appears to have stopped. Restarting detection process")
|
|
||||||
detector.start_or_restart()
|
|
||||||
|
|
||||||
# check the camera processes
|
|
||||||
for name, camera_process in self.camera_processes.items():
|
|
||||||
process = camera_process['process']
|
|
||||||
if not process.is_alive():
|
|
||||||
print(f"Track process for {name} is not alive. Starting again...")
|
|
||||||
camera_process['process_fps'].value = 0.0
|
|
||||||
camera_process['detection_fps'].value = 0.0
|
|
||||||
camera_process['read_start'].value = 0.0
|
|
||||||
process = mp.Process(target=track_camera, args=(name, self.config[name], camera_process['frame_queue'],
|
|
||||||
camera_process['frame_shape'], self.detection_queue, self.tracked_objects_queue,
|
|
||||||
camera_process['process_fps'], camera_process['detection_fps'],
|
|
||||||
camera_process['read_start'], self.stop_event))
|
|
||||||
process.daemon = True
|
|
||||||
camera_process['process'] = process
|
|
||||||
process.start()
|
|
||||||
print(f"Track process started for {name}: {process.pid}")
|
|
||||||
|
|
||||||
if not camera_process['capture_thread'].is_alive():
|
|
||||||
frame_shape = camera_process['frame_shape']
|
|
||||||
frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
|
|
||||||
ffmpeg_process = start_or_restart_ffmpeg(camera_process['ffmpeg_cmd'], frame_size)
|
|
||||||
camera_capture = CameraCapture(name, ffmpeg_process, frame_shape, camera_process['frame_queue'],
|
|
||||||
camera_process['take_frame'], camera_process['camera_fps'], self.stop_event)
|
|
||||||
camera_capture.start()
|
|
||||||
camera_process['ffmpeg_process'] = ffmpeg_process
|
|
||||||
camera_process['capture_thread'] = camera_capture
|
|
||||||
elif now - camera_process['capture_thread'].current_frame.value > 5:
|
|
||||||
print(f"No frames received from {name} in 5 seconds. Exiting ffmpeg...")
|
|
||||||
ffmpeg_process = camera_process['ffmpeg_process']
|
|
||||||
ffmpeg_process.terminate()
|
|
||||||
try:
|
|
||||||
print("Waiting for ffmpeg to exit gracefully...")
|
|
||||||
ffmpeg_process.communicate(timeout=30)
|
|
||||||
except sp.TimeoutExpired:
|
|
||||||
print("FFmpeg didnt exit. Force killing...")
|
|
||||||
ffmpeg_process.kill()
|
|
||||||
ffmpeg_process.communicate()
|
|
||||||
|
|
||||||
def main():
|
|
||||||
stop_event = threading.Event()
|
|
||||||
# connect to mqtt and setup last will
|
|
||||||
def on_connect(client, userdata, flags, rc):
|
|
||||||
print("On connect called")
|
|
||||||
if rc != 0:
|
|
||||||
if rc == 3:
|
|
||||||
print ("MQTT Server unavailable")
|
|
||||||
elif rc == 4:
|
|
||||||
print ("MQTT Bad username or password")
|
|
||||||
elif rc == 5:
|
|
||||||
print ("MQTT Not authorized")
|
|
||||||
else:
|
|
||||||
print ("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
|
|
||||||
# publish a message to signal that the service is running
|
|
||||||
client.publish(MQTT_TOPIC_PREFIX+'/available', 'online', retain=True)
|
|
||||||
client = mqtt.Client(client_id=MQTT_CLIENT_ID)
|
|
||||||
client.on_connect = on_connect
|
|
||||||
client.will_set(MQTT_TOPIC_PREFIX+'/available', payload='offline', qos=1, retain=True)
|
|
||||||
if not MQTT_USER is None:
|
|
||||||
client.username_pw_set(MQTT_USER, password=MQTT_PASS)
|
|
||||||
client.connect(MQTT_HOST, MQTT_PORT, 60)
|
|
||||||
client.loop_start()
|
|
||||||
|
|
||||||
##
|
|
||||||
# Setup config defaults for cameras
|
|
||||||
##
|
|
||||||
for name, config in CONFIG['cameras'].items():
|
|
||||||
config['snapshots'] = {
|
|
||||||
'show_timestamp': config.get('snapshots', {}).get('show_timestamp', True),
|
|
||||||
'draw_zones': config.get('snapshots', {}).get('draw_zones', False)
|
|
||||||
}
|
|
||||||
config['zones'] = config.get('zones', {})
|
|
||||||
|
|
||||||
# Queue for cameras to push tracked objects to
|
|
||||||
tracked_objects_queue = mp.Queue()
|
|
||||||
|
|
||||||
# Queue for clip processing
|
|
||||||
event_queue = mp.Queue()
|
|
||||||
|
|
||||||
# create the detection pipes and shms
|
|
||||||
out_events = {}
|
|
||||||
camera_shms = []
|
|
||||||
for name in CONFIG['cameras'].keys():
|
|
||||||
out_events[name] = mp.Event()
|
|
||||||
shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=300*300*3)
|
|
||||||
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20*6*4)
|
|
||||||
camera_shms.append(shm_in)
|
|
||||||
camera_shms.append(shm_out)
|
|
||||||
|
|
||||||
detection_queue = mp.Queue()
|
|
||||||
|
|
||||||
detectors = {}
|
|
||||||
for name, detector in DETECTORS.items():
|
|
||||||
if detector['type'] == 'cpu':
|
|
||||||
detectors[name] = EdgeTPUProcess(detection_queue, out_events=out_events, tf_device='cpu')
|
|
||||||
if detector['type'] == 'edgetpu':
|
|
||||||
detectors[name] = EdgeTPUProcess(detection_queue, out_events=out_events, tf_device=detector['device'])
|
|
||||||
|
|
||||||
# create the camera processes
|
|
||||||
camera_processes = {}
|
|
||||||
for name, config in CONFIG['cameras'].items():
|
|
||||||
# Merge the ffmpeg config with the global config
|
|
||||||
ffmpeg = config.get('ffmpeg', {})
|
|
||||||
ffmpeg_input = get_ffmpeg_input(ffmpeg['input'])
|
|
||||||
ffmpeg_global_args = ffmpeg.get('global_args', FFMPEG_DEFAULT_CONFIG['global_args'])
|
|
||||||
ffmpeg_hwaccel_args = ffmpeg.get('hwaccel_args', FFMPEG_DEFAULT_CONFIG['hwaccel_args'])
|
|
||||||
ffmpeg_input_args = ffmpeg.get('input_args', FFMPEG_DEFAULT_CONFIG['input_args'])
|
|
||||||
ffmpeg_output_args = ffmpeg.get('output_args', FFMPEG_DEFAULT_CONFIG['output_args'])
|
|
||||||
if not config.get('fps') is None:
|
|
||||||
ffmpeg_output_args = ["-r", str(config.get('fps'))] + ffmpeg_output_args
|
|
||||||
if config.get('save_clips', {}).get('enabled', False):
|
|
||||||
ffmpeg_output_args = [
|
|
||||||
"-f",
|
|
||||||
"segment",
|
|
||||||
"-segment_time",
|
|
||||||
"10",
|
|
||||||
"-segment_format",
|
|
||||||
"mp4",
|
|
||||||
"-reset_timestamps",
|
|
||||||
"1",
|
|
||||||
"-strftime",
|
|
||||||
"1",
|
|
||||||
"-c",
|
|
||||||
"copy",
|
|
||||||
"-an",
|
|
||||||
"-map",
|
|
||||||
"0",
|
|
||||||
f"/cache/{name}-%Y%m%d%H%M%S.mp4"
|
|
||||||
] + ffmpeg_output_args
|
|
||||||
ffmpeg_cmd = (['ffmpeg'] +
|
|
||||||
ffmpeg_global_args +
|
|
||||||
ffmpeg_hwaccel_args +
|
|
||||||
ffmpeg_input_args +
|
|
||||||
['-i', ffmpeg_input] +
|
|
||||||
ffmpeg_output_args +
|
|
||||||
['pipe:'])
|
|
||||||
|
|
||||||
if 'width' in config and 'height' in config:
|
|
||||||
frame_shape = (config['height'], config['width'], 3)
|
|
||||||
else:
|
|
||||||
frame_shape = get_frame_shape(ffmpeg_input)
|
|
||||||
|
|
||||||
config['frame_shape'] = frame_shape
|
|
||||||
|
|
||||||
frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
|
|
||||||
take_frame = config.get('take_frame', 1)
|
|
||||||
|
|
||||||
detection_frame = mp.Value('d', 0.0)
|
|
||||||
|
|
||||||
ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, frame_size)
|
|
||||||
frame_queue = mp.Queue(maxsize=2)
|
|
||||||
camera_fps = EventsPerSecond()
|
|
||||||
camera_fps.start()
|
|
||||||
camera_capture = CameraCapture(name, ffmpeg_process, frame_shape, frame_queue, take_frame, camera_fps, stop_event)
|
|
||||||
camera_capture.start()
|
|
||||||
|
|
||||||
camera_processes[name] = {
|
|
||||||
'camera_fps': camera_fps,
|
|
||||||
'take_frame': take_frame,
|
|
||||||
'process_fps': mp.Value('d', 0.0),
|
|
||||||
'detection_fps': mp.Value('d', 0.0),
|
|
||||||
'detection_frame': detection_frame,
|
|
||||||
'read_start': mp.Value('d', 0.0),
|
|
||||||
'ffmpeg_process': ffmpeg_process,
|
|
||||||
'ffmpeg_cmd': ffmpeg_cmd,
|
|
||||||
'frame_queue': frame_queue,
|
|
||||||
'frame_shape': frame_shape,
|
|
||||||
'capture_thread': camera_capture
|
|
||||||
}
|
|
||||||
|
|
||||||
# merge global object config into camera object config
|
|
||||||
camera_objects_config = config.get('objects', {})
|
|
||||||
# get objects to track for camera
|
|
||||||
objects_to_track = camera_objects_config.get('track', GLOBAL_OBJECT_CONFIG.get('track', ['person']))
|
|
||||||
# get object filters
|
|
||||||
object_filters = camera_objects_config.get('filters', GLOBAL_OBJECT_CONFIG.get('filters', {}))
|
|
||||||
config['objects'] = {
|
|
||||||
'track': objects_to_track,
|
|
||||||
'filters': object_filters
|
|
||||||
}
|
|
||||||
|
|
||||||
camera_process = mp.Process(target=track_camera, args=(name, config, frame_queue, frame_shape,
|
|
||||||
detection_queue, out_events[name], tracked_objects_queue, camera_processes[name]['process_fps'],
|
|
||||||
camera_processes[name]['detection_fps'],
|
|
||||||
camera_processes[name]['read_start'], camera_processes[name]['detection_frame'], stop_event))
|
|
||||||
camera_process.daemon = True
|
|
||||||
camera_processes[name]['process'] = camera_process
|
|
||||||
|
|
||||||
# start the camera_processes
|
|
||||||
for name, camera_process in camera_processes.items():
|
|
||||||
camera_process['process'].start()
|
|
||||||
print(f"Camera_process started for {name}: {camera_process['process'].pid}")
|
|
||||||
|
|
||||||
event_processor = EventProcessor(CONFIG, camera_processes, '/cache', '/clips', event_queue, stop_event)
|
|
||||||
event_processor.start()
|
|
||||||
|
|
||||||
object_processor = TrackedObjectProcessor(CONFIG['cameras'], client, MQTT_TOPIC_PREFIX, tracked_objects_queue, event_queue, stop_event)
|
|
||||||
object_processor.start()
|
|
||||||
|
|
||||||
camera_watchdog = CameraWatchdog(camera_processes, CONFIG['cameras'], detectors, detection_queue, tracked_objects_queue, stop_event)
|
|
||||||
camera_watchdog.start()
|
|
||||||
|
|
||||||
def receiveSignal(signalNumber, frame):
|
|
||||||
print('Received:', signalNumber)
|
|
||||||
stop_event.set()
|
|
||||||
event_processor.join()
|
|
||||||
object_processor.join()
|
|
||||||
camera_watchdog.join()
|
|
||||||
for camera_name, camera_process in camera_processes.items():
|
|
||||||
camera_process['capture_thread'].join()
|
|
||||||
# cleanup the frame queue
|
|
||||||
while not camera_process['frame_queue'].empty():
|
|
||||||
frame_time = camera_process['frame_queue'].get()
|
|
||||||
shm = mp.shared_memory.SharedMemory(name=f"{camera_name}{frame_time}")
|
|
||||||
shm.close()
|
|
||||||
shm.unlink()
|
|
||||||
|
|
||||||
for detector in detectors:
|
|
||||||
detector.stop()
|
|
||||||
for shm in camera_shms:
|
|
||||||
shm.close()
|
|
||||||
shm.unlink()
|
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
signal.signal(signal.SIGTERM, receiveSignal)
|
|
||||||
signal.signal(signal.SIGINT, receiveSignal)
|
|
||||||
|
|
||||||
# create a flask app that encodes frames a mjpeg on demand
|
|
||||||
app = Flask(__name__)
|
|
||||||
log = logging.getLogger('werkzeug')
|
|
||||||
log.setLevel(logging.ERROR)
|
|
||||||
|
|
||||||
@app.route('/')
|
|
||||||
def ishealthy():
|
|
||||||
# return a healh
|
|
||||||
return "Frigate is running. Alive and healthy!"
|
|
||||||
|
|
||||||
@app.route('/debug/stack')
|
|
||||||
def processor_stack():
|
|
||||||
frame = sys._current_frames().get(object_processor.ident, None)
|
|
||||||
if frame:
|
|
||||||
return "<br>".join(traceback.format_stack(frame)), 200
|
|
||||||
else:
|
|
||||||
return "no frame found", 200
|
|
||||||
|
|
||||||
@app.route('/debug/print_stack')
|
|
||||||
def print_stack():
|
|
||||||
pid = int(request.args.get('pid', 0))
|
|
||||||
if pid == 0:
|
|
||||||
return "missing pid", 200
|
|
||||||
else:
|
|
||||||
os.kill(pid, signal.SIGUSR1)
|
|
||||||
return "check logs", 200
|
|
||||||
|
|
||||||
@app.route('/debug/stats')
|
|
||||||
def stats():
|
|
||||||
stats = {}
|
|
||||||
|
|
||||||
total_detection_fps = 0
|
|
||||||
|
|
||||||
for name, camera_stats in camera_processes.items():
|
|
||||||
total_detection_fps += camera_stats['detection_fps'].value
|
|
||||||
capture_thread = camera_stats['capture_thread']
|
|
||||||
stats[name] = {
|
|
||||||
'camera_fps': round(capture_thread.fps.eps(), 2),
|
|
||||||
'process_fps': round(camera_stats['process_fps'].value, 2),
|
|
||||||
'skipped_fps': round(capture_thread.skipped_fps.eps(), 2),
|
|
||||||
'detection_fps': round(camera_stats['detection_fps'].value, 2),
|
|
||||||
'read_start': camera_stats['read_start'].value,
|
|
||||||
'pid': camera_stats['process'].pid,
|
|
||||||
'ffmpeg_pid': camera_stats['ffmpeg_process'].pid,
|
|
||||||
'frame_info': {
|
|
||||||
'read': capture_thread.current_frame.value,
|
|
||||||
'detect': camera_stats['detection_frame'].value,
|
|
||||||
'process': object_processor.camera_data[name]['current_frame_time']
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stats['detectors'] = {}
|
|
||||||
for name, detector in detectors.items():
|
|
||||||
stats['detectors'][name] = {
|
|
||||||
'inference_speed': round(detector.avg_inference_speed.value*1000, 2),
|
|
||||||
'detection_start': detector.detection_start.value,
|
|
||||||
'pid': detector.detect_process.pid
|
|
||||||
}
|
|
||||||
stats['detection_fps'] = round(total_detection_fps, 2)
|
|
||||||
|
|
||||||
return jsonify(stats)
|
|
||||||
|
|
||||||
@app.route('/<camera_name>/<label>/best.jpg')
|
|
||||||
def best(camera_name, label):
|
|
||||||
if camera_name in CONFIG['cameras']:
|
|
||||||
best_object = object_processor.get_best(camera_name, label)
|
|
||||||
best_frame = best_object.get('frame', np.zeros((720,1280,3), np.uint8))
|
|
||||||
|
|
||||||
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
|
|
||||||
|
|
||||||
crop = bool(request.args.get('crop', 0, type=int))
|
|
||||||
if crop:
|
|
||||||
region = best_object.get('region', [0,0,300,300])
|
|
||||||
best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
|
|
||||||
|
|
||||||
height = int(request.args.get('h', str(best_frame.shape[0])))
|
|
||||||
width = int(height*best_frame.shape[1]/best_frame.shape[0])
|
|
||||||
|
|
||||||
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
|
||||||
ret, jpg = cv2.imencode('.jpg', best_frame)
|
|
||||||
response = make_response(jpg.tobytes())
|
|
||||||
response.headers['Content-Type'] = 'image/jpg'
|
|
||||||
return response
|
|
||||||
else:
|
|
||||||
return "Camera named {} not found".format(camera_name), 404
|
|
||||||
|
|
||||||
@app.route('/<camera_name>')
|
|
||||||
def mjpeg_feed(camera_name):
|
|
||||||
fps = int(request.args.get('fps', '3'))
|
|
||||||
height = int(request.args.get('h', '360'))
|
|
||||||
if camera_name in CONFIG['cameras']:
|
|
||||||
# return a multipart response
|
|
||||||
return Response(imagestream(camera_name, fps, height),
|
|
||||||
mimetype='multipart/x-mixed-replace; boundary=frame')
|
|
||||||
else:
|
|
||||||
return "Camera named {} not found".format(camera_name), 404
|
|
||||||
|
|
||||||
@app.route('/<camera_name>/latest.jpg')
|
|
||||||
def latest_frame(camera_name):
|
|
||||||
if camera_name in CONFIG['cameras']:
|
|
||||||
# max out at specified FPS
|
|
||||||
frame = object_processor.get_current_frame(camera_name)
|
|
||||||
if frame is None:
|
|
||||||
frame = np.zeros((720,1280,3), np.uint8)
|
|
||||||
|
|
||||||
height = int(request.args.get('h', str(frame.shape[0])))
|
|
||||||
width = int(height*frame.shape[1]/frame.shape[0])
|
|
||||||
|
|
||||||
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
|
||||||
|
|
||||||
ret, jpg = cv2.imencode('.jpg', frame)
|
|
||||||
response = make_response(jpg.tobytes())
|
|
||||||
response.headers['Content-Type'] = 'image/jpg'
|
|
||||||
return response
|
|
||||||
else:
|
|
||||||
return "Camera named {} not found".format(camera_name), 404
|
|
||||||
|
|
||||||
def imagestream(camera_name, fps, height):
|
|
||||||
while True:
|
|
||||||
# max out at specified FPS
|
|
||||||
time.sleep(1/fps)
|
|
||||||
frame = object_processor.get_current_frame(camera_name, draw=True)
|
|
||||||
if frame is None:
|
|
||||||
frame = np.zeros((height,int(height*16/9),3), np.uint8)
|
|
||||||
|
|
||||||
width = int(height*frame.shape[1]/frame.shape[0])
|
|
||||||
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
|
|
||||||
|
|
||||||
ret, jpg = cv2.imencode('.jpg', frame)
|
|
||||||
yield (b'--frame\r\n'
|
|
||||||
b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
|
|
||||||
|
|
||||||
app.run(host='0.0.0.0', port=WEB_PORT, debug=False)
|
|
||||||
|
|
||||||
object_processor.join()
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
@@ -17,6 +17,6 @@ RUN apt-get -qq update \
|
|||||||
libtiff5 \
|
libtiff5 \
|
||||||
libdc1394-22 \
|
libdc1394-22 \
|
||||||
## Tensorflow lite
|
## Tensorflow lite
|
||||||
&& pip3 install https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp38-cp38-linux_aarch64.whl \
|
&& pip3 install https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_aarch64.whl \
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
&& (apt-get autoremove -y; apt-get autoclean -y)
|
&& (apt-get autoremove -y; apt-get autoclean -y)
|
||||||
@@ -1,16 +1,18 @@
|
|||||||
FROM frigate-base
|
FROM frigate-base
|
||||||
LABEL maintainer "blakeb@blakeshome.com"
|
LABEL maintainer "blakeb@blakeshome.com"
|
||||||
|
|
||||||
|
# By default, use the i965 driver
|
||||||
|
ENV LIBVA_DRIVER_NAME=i965
|
||||||
# Install packages for apt repo
|
# Install packages for apt repo
|
||||||
RUN apt-get -qq update \
|
RUN apt-get -qq update \
|
||||||
&& apt-get -qq install --no-install-recommends -y \
|
&& apt-get -qq install --no-install-recommends -y \
|
||||||
# ffmpeg dependencies
|
# ffmpeg dependencies
|
||||||
libgomp1 \
|
libgomp1 \
|
||||||
# VAAPI drivers for Intel hardware accel
|
# VAAPI drivers for Intel hardware accel
|
||||||
libva-drm2 libva2 i965-va-driver vainfo \
|
libva-drm2 libva2 i965-va-driver vainfo intel-media-va-driver mesa-va-drivers \
|
||||||
## Tensorflow lite
|
## Tensorflow lite
|
||||||
&& wget -q https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp38-cp38-linux_x86_64.whl \
|
&& wget -q https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
|
||||||
&& python3.8 -m pip install tflite_runtime-2.1.0.post1-cp38-cp38-linux_x86_64.whl \
|
&& python3.8 -m pip install tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
|
||||||
&& rm tflite_runtime-2.1.0.post1-cp38-cp38-linux_x86_64.whl \
|
&& rm tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
&& (apt-get autoremove -y; apt-get autoclean -y)
|
&& (apt-get autoremove -y; apt-get autoclean -y)
|
||||||
47
docker/Dockerfile.amd64nvidia
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
FROM frigate-base
|
||||||
|
LABEL maintainer "blakeb@blakeshome.com"
|
||||||
|
|
||||||
|
# Install packages for apt repo
|
||||||
|
RUN apt-get -qq update \
|
||||||
|
&& apt-get -qq install --no-install-recommends -y \
|
||||||
|
# ffmpeg dependencies
|
||||||
|
libgomp1 \
|
||||||
|
## Tensorflow lite
|
||||||
|
&& wget -q https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
|
||||||
|
&& python3.8 -m pip install tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
|
||||||
|
&& rm tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
|
&& (apt-get autoremove -y; apt-get autoclean -y)
|
||||||
|
|
||||||
|
|
||||||
|
# nvidia layer (see https://gitlab.com/nvidia/container-images/cuda/blob/master/dist/11.1/ubuntu20.04-x86_64/base/Dockerfile)
|
||||||
|
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,video
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
gnupg2 curl ca-certificates && \
|
||||||
|
curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/7fa2af80.pub | apt-key add - && \
|
||||||
|
echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64 /" > /etc/apt/sources.list.d/cuda.list && \
|
||||||
|
echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu2004/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list && \
|
||||||
|
apt-get purge --autoremove -y curl \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
ENV CUDA_VERSION 11.1.1
|
||||||
|
|
||||||
|
# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
cuda-cudart-11-1=11.1.74-1 \
|
||||||
|
cuda-compat-11-1 \
|
||||||
|
&& ln -s cuda-11.1 /usr/local/cuda && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Required for nvidia-docker v1
|
||||||
|
RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \
|
||||||
|
echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf
|
||||||
|
|
||||||
|
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH}
|
||||||
|
ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||||
|
|
||||||
|
# nvidia-container-runtime
|
||||||
|
ENV NVIDIA_VISIBLE_DEVICES all
|
||||||
|
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,video
|
||||||
|
ENV NVIDIA_REQUIRE_CUDA "cuda>=11.1 brand=tesla,driver>=418,driver<419 brand=tesla,driver>=440,driver<441 brand=tesla,driver>=450,driver<451"
|
||||||
@@ -19,6 +19,6 @@ RUN apt-get -qq update \
|
|||||||
libaom0 \
|
libaom0 \
|
||||||
libx265-179 \
|
libx265-179 \
|
||||||
## Tensorflow lite
|
## Tensorflow lite
|
||||||
&& pip3 install https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp38-cp38-linux_armv7l.whl \
|
&& pip3 install https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_armv7l.whl \
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
&& (apt-get autoremove -y; apt-get autoclean -y)
|
&& (apt-get autoremove -y; apt-get autoclean -y)
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
ARG ARCH=amd64
|
ARG ARCH=amd64
|
||||||
FROM blakeblackshear/frigate-wheels:${ARCH} as wheels
|
FROM blakeblackshear/frigate-wheels:${ARCH} as wheels
|
||||||
FROM blakeblackshear/frigate-ffmpeg:${ARCH} as ffmpeg
|
FROM blakeblackshear/frigate-ffmpeg:1.0.0-${ARCH} as ffmpeg
|
||||||
|
|
||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
LABEL maintainer "blakeb@blakeshome.com"
|
LABEL maintainer "blakeb@blakeshome.com"
|
||||||
@@ -9,10 +9,14 @@ COPY --from=ffmpeg /usr/local /usr/local/
|
|||||||
|
|
||||||
COPY --from=wheels /wheels/. /wheels/
|
COPY --from=wheels /wheels/. /wheels/
|
||||||
|
|
||||||
|
ENV FLASK_ENV=development
|
||||||
|
# ENV FONTCONFIG_PATH=/etc/fonts
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
# Install packages for apt repo
|
# Install packages for apt repo
|
||||||
RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
|
RUN apt-get -qq update \
|
||||||
gnupg wget unzip tzdata \
|
&& apt-get upgrade -y \
|
||||||
|
&& apt-get -qq install --no-install-recommends -y \
|
||||||
|
gnupg wget unzip tzdata nginx libnginx-mod-rtmp \
|
||||||
&& apt-get -qq install --no-install-recommends -y \
|
&& apt-get -qq install --no-install-recommends -y \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
&& pip3 install -U /wheels/*.whl \
|
&& pip3 install -U /wheels/*.whl \
|
||||||
@@ -20,22 +24,30 @@ RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
|
|||||||
&& echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" > /etc/apt/sources.list.d/coral-edgetpu.list \
|
&& echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" > /etc/apt/sources.list.d/coral-edgetpu.list \
|
||||||
&& echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections \
|
&& echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections \
|
||||||
&& apt-get -qq update && apt-get -qq install --no-install-recommends -y \
|
&& apt-get -qq update && apt-get -qq install --no-install-recommends -y \
|
||||||
libedgetpu1-max \
|
libedgetpu1-max=15.0 \
|
||||||
&& rm -rf /var/lib/apt/lists/* /wheels \
|
&& rm -rf /var/lib/apt/lists/* /wheels \
|
||||||
&& (apt-get autoremove -y; apt-get autoclean -y)
|
&& (apt-get autoremove -y; apt-get autoclean -y)
|
||||||
|
|
||||||
|
RUN pip3 install \
|
||||||
|
peewee \
|
||||||
|
zeroconf \
|
||||||
|
voluptuous
|
||||||
|
|
||||||
|
COPY nginx/nginx.conf /etc/nginx/nginx.conf
|
||||||
|
|
||||||
# get model and labels
|
# get model and labels
|
||||||
ARG MODEL_REFS=7064b94dd5b996189242320359dbab8b52c94a84
|
ARG MODEL_REFS=7064b94dd5b996189242320359dbab8b52c94a84
|
||||||
COPY labelmap.txt /labelmap.txt
|
COPY labelmap.txt /labelmap.txt
|
||||||
RUN wget -q https://github.com/google-coral/edgetpu/raw/$MODEL_REFS/test_data/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite -O /edgetpu_model.tflite
|
RUN wget -q https://github.com/google-coral/edgetpu/raw/$MODEL_REFS/test_data/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite -O /edgetpu_model.tflite
|
||||||
RUN wget -q https://github.com/google-coral/edgetpu/raw/$MODEL_REFS/test_data/ssd_mobilenet_v2_coco_quant_postprocess.tflite -O /cpu_model.tflite
|
RUN wget -q https://github.com/google-coral/edgetpu/raw/$MODEL_REFS/test_data/ssd_mobilenet_v2_coco_quant_postprocess.tflite -O /cpu_model.tflite
|
||||||
|
|
||||||
RUN mkdir /cache /clips
|
|
||||||
|
|
||||||
WORKDIR /opt/frigate/
|
WORKDIR /opt/frigate/
|
||||||
ADD frigate frigate/
|
ADD frigate frigate/
|
||||||
COPY detect_objects.py .
|
|
||||||
COPY benchmark.py .
|
|
||||||
COPY process_clip.py .
|
|
||||||
|
|
||||||
CMD ["python3", "-u", "detect_objects.py"]
|
COPY run.sh /run.sh
|
||||||
|
RUN chmod +x /run.sh
|
||||||
|
|
||||||
|
EXPOSE 5000
|
||||||
|
EXPOSE 1935
|
||||||
|
|
||||||
|
CMD ["/run.sh"]
|
||||||
|
|||||||
@@ -18,12 +18,10 @@ FROM base as build
|
|||||||
ENV FFMPEG_VERSION=4.3.1 \
|
ENV FFMPEG_VERSION=4.3.1 \
|
||||||
AOM_VERSION=v1.0.0 \
|
AOM_VERSION=v1.0.0 \
|
||||||
FDKAAC_VERSION=0.1.5 \
|
FDKAAC_VERSION=0.1.5 \
|
||||||
FONTCONFIG_VERSION=2.12.4 \
|
|
||||||
FREETYPE_VERSION=2.5.5 \
|
FREETYPE_VERSION=2.5.5 \
|
||||||
FRIBIDI_VERSION=0.19.7 \
|
FRIBIDI_VERSION=0.19.7 \
|
||||||
KVAZAAR_VERSION=1.2.0 \
|
KVAZAAR_VERSION=1.2.0 \
|
||||||
LAME_VERSION=3.100 \
|
LAME_VERSION=3.100 \
|
||||||
LIBASS_VERSION=0.13.7 \
|
|
||||||
LIBPTHREAD_STUBS_VERSION=0.4 \
|
LIBPTHREAD_STUBS_VERSION=0.4 \
|
||||||
LIBVIDSTAB_VERSION=1.1.0 \
|
LIBVIDSTAB_VERSION=1.1.0 \
|
||||||
LIBXCB_VERSION=1.13.1 \
|
LIBXCB_VERSION=1.13.1 \
|
||||||
@@ -42,22 +40,17 @@ ENV FFMPEG_VERSION=4.3.1 \
|
|||||||
XORG_MACROS_VERSION=1.19.2 \
|
XORG_MACROS_VERSION=1.19.2 \
|
||||||
XPROTO_VERSION=7.0.31 \
|
XPROTO_VERSION=7.0.31 \
|
||||||
XVID_VERSION=1.3.4 \
|
XVID_VERSION=1.3.4 \
|
||||||
LIBXML2_VERSION=2.9.10 \
|
|
||||||
LIBBLURAY_VERSION=1.1.2 \
|
|
||||||
LIBZMQ_VERSION=4.3.2 \
|
LIBZMQ_VERSION=4.3.2 \
|
||||||
SRC=/usr/local
|
SRC=/usr/local
|
||||||
|
|
||||||
ARG FREETYPE_SHA256SUM="5d03dd76c2171a7601e9ce10551d52d4471cf92cd205948e60289251daddffa8 freetype-2.5.5.tar.gz"
|
ARG FREETYPE_SHA256SUM="5d03dd76c2171a7601e9ce10551d52d4471cf92cd205948e60289251daddffa8 freetype-2.5.5.tar.gz"
|
||||||
ARG FRIBIDI_SHA256SUM="3fc96fa9473bd31dcb5500bdf1aa78b337ba13eb8c301e7c28923fea982453a8 0.19.7.tar.gz"
|
ARG FRIBIDI_SHA256SUM="3fc96fa9473bd31dcb5500bdf1aa78b337ba13eb8c301e7c28923fea982453a8 0.19.7.tar.gz"
|
||||||
ARG LIBASS_SHA256SUM="8fadf294bf701300d4605e6f1d92929304187fca4b8d8a47889315526adbafd7 0.13.7.tar.gz"
|
|
||||||
ARG LIBVIDSTAB_SHA256SUM="14d2a053e56edad4f397be0cb3ef8eb1ec3150404ce99a426c4eb641861dc0bb v1.1.0.tar.gz"
|
ARG LIBVIDSTAB_SHA256SUM="14d2a053e56edad4f397be0cb3ef8eb1ec3150404ce99a426c4eb641861dc0bb v1.1.0.tar.gz"
|
||||||
ARG OGG_SHA256SUM="e19ee34711d7af328cb26287f4137e70630e7261b17cbe3cd41011d73a654692 libogg-1.3.2.tar.gz"
|
ARG OGG_SHA256SUM="e19ee34711d7af328cb26287f4137e70630e7261b17cbe3cd41011d73a654692 libogg-1.3.2.tar.gz"
|
||||||
ARG OPUS_SHA256SUM="77db45a87b51578fbc49555ef1b10926179861d854eb2613207dc79d9ec0a9a9 opus-1.2.tar.gz"
|
ARG OPUS_SHA256SUM="77db45a87b51578fbc49555ef1b10926179861d854eb2613207dc79d9ec0a9a9 opus-1.2.tar.gz"
|
||||||
ARG THEORA_SHA256SUM="40952956c47811928d1e7922cda3bc1f427eb75680c3c37249c91e949054916b libtheora-1.1.1.tar.gz"
|
ARG THEORA_SHA256SUM="40952956c47811928d1e7922cda3bc1f427eb75680c3c37249c91e949054916b libtheora-1.1.1.tar.gz"
|
||||||
ARG VORBIS_SHA256SUM="6efbcecdd3e5dfbf090341b485da9d176eb250d893e3eb378c428a2db38301ce libvorbis-1.3.5.tar.gz"
|
ARG VORBIS_SHA256SUM="6efbcecdd3e5dfbf090341b485da9d176eb250d893e3eb378c428a2db38301ce libvorbis-1.3.5.tar.gz"
|
||||||
ARG XVID_SHA256SUM="4e9fd62728885855bc5007fe1be58df42e5e274497591fec37249e1052ae316f xvidcore-1.3.4.tar.gz"
|
ARG XVID_SHA256SUM="4e9fd62728885855bc5007fe1be58df42e5e274497591fec37249e1052ae316f xvidcore-1.3.4.tar.gz"
|
||||||
ARG LIBXML2_SHA256SUM="f07dab13bf42d2b8db80620cce7419b3b87827cc937c8bb20fe13b8571ee9501 libxml2-v2.9.10.tar.gz"
|
|
||||||
ARG LIBBLURAY_SHA256SUM="a3dd452239b100dc9da0d01b30e1692693e2a332a7d29917bf84bb10ea7c0b42 libbluray-1.1.2.tar.bz2"
|
|
||||||
ARG LIBZMQ_SHA256SUM="02ecc88466ae38cf2c8d79f09cfd2675ba299a439680b64ade733e26a349edeb v4.3.2.tar.gz"
|
ARG LIBZMQ_SHA256SUM="02ecc88466ae38cf2c8d79f09cfd2675ba299a439680b64ade733e26a349edeb v4.3.2.tar.gz"
|
||||||
|
|
||||||
|
|
||||||
@@ -287,30 +280,7 @@ RUN \
|
|||||||
make -j1 && \
|
make -j1 && \
|
||||||
make -j $(nproc) install && \
|
make -j $(nproc) install && \
|
||||||
rm -rf ${DIR}
|
rm -rf ${DIR}
|
||||||
## fontconfig https://www.freedesktop.org/wiki/Software/fontconfig/
|
|
||||||
RUN \
|
|
||||||
DIR=/tmp/fontconfig && \
|
|
||||||
mkdir -p ${DIR} && \
|
|
||||||
cd ${DIR} && \
|
|
||||||
curl -sLO https://www.freedesktop.org/software/fontconfig/release/fontconfig-${FONTCONFIG_VERSION}.tar.bz2 && \
|
|
||||||
tar -jx --strip-components=1 -f fontconfig-${FONTCONFIG_VERSION}.tar.bz2 && \
|
|
||||||
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
|
||||||
make -j $(nproc) && \
|
|
||||||
make -j $(nproc) install && \
|
|
||||||
rm -rf ${DIR}
|
|
||||||
## libass https://github.com/libass/libass
|
|
||||||
RUN \
|
|
||||||
DIR=/tmp/libass && \
|
|
||||||
mkdir -p ${DIR} && \
|
|
||||||
cd ${DIR} && \
|
|
||||||
curl -sLO https://github.com/libass/libass/archive/${LIBASS_VERSION}.tar.gz && \
|
|
||||||
echo ${LIBASS_SHA256SUM} | sha256sum --check && \
|
|
||||||
tar -zx --strip-components=1 -f ${LIBASS_VERSION}.tar.gz && \
|
|
||||||
./autogen.sh && \
|
|
||||||
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
|
||||||
make -j $(nproc) && \
|
|
||||||
make -j $(nproc) install && \
|
|
||||||
rm -rf ${DIR}
|
|
||||||
## kvazaar https://github.com/ultravideo/kvazaar
|
## kvazaar https://github.com/ultravideo/kvazaar
|
||||||
RUN \
|
RUN \
|
||||||
DIR=/tmp/kvazaar && \
|
DIR=/tmp/kvazaar && \
|
||||||
@@ -407,32 +377,6 @@ RUN \
|
|||||||
make -j $(nproc) install && \
|
make -j $(nproc) install && \
|
||||||
rm -rf ${DIR}
|
rm -rf ${DIR}
|
||||||
|
|
||||||
## libxml2 - for libbluray
|
|
||||||
RUN \
|
|
||||||
DIR=/tmp/libxml2 && \
|
|
||||||
mkdir -p ${DIR} && \
|
|
||||||
cd ${DIR} && \
|
|
||||||
curl -sLO https://gitlab.gnome.org/GNOME/libxml2/-/archive/v${LIBXML2_VERSION}/libxml2-v${LIBXML2_VERSION}.tar.gz && \
|
|
||||||
echo ${LIBXML2_SHA256SUM} | sha256sum --check && \
|
|
||||||
tar -xz --strip-components=1 -f libxml2-v${LIBXML2_VERSION}.tar.gz && \
|
|
||||||
./autogen.sh --prefix="${PREFIX}" --with-ftp=no --with-http=no --with-python=no && \
|
|
||||||
make -j $(nproc) && \
|
|
||||||
make -j $(nproc) install && \
|
|
||||||
rm -rf ${DIR}
|
|
||||||
|
|
||||||
## libbluray - Requires libxml, freetype, and fontconfig
|
|
||||||
RUN \
|
|
||||||
DIR=/tmp/libbluray && \
|
|
||||||
mkdir -p ${DIR} && \
|
|
||||||
cd ${DIR} && \
|
|
||||||
curl -sLO https://download.videolan.org/pub/videolan/libbluray/${LIBBLURAY_VERSION}/libbluray-${LIBBLURAY_VERSION}.tar.bz2 && \
|
|
||||||
echo ${LIBBLURAY_SHA256SUM} | sha256sum --check && \
|
|
||||||
tar -jx --strip-components=1 -f libbluray-${LIBBLURAY_VERSION}.tar.bz2 && \
|
|
||||||
./configure --prefix="${PREFIX}" --disable-examples --disable-bdjava-jar --disable-static --enable-shared && \
|
|
||||||
make -j $(nproc) && \
|
|
||||||
make -j $(nproc) install && \
|
|
||||||
rm -rf ${DIR}
|
|
||||||
|
|
||||||
## libzmq https://github.com/zeromq/libzmq/
|
## libzmq https://github.com/zeromq/libzmq/
|
||||||
RUN \
|
RUN \
|
||||||
DIR=/tmp/libzmq && \
|
DIR=/tmp/libzmq && \
|
||||||
@@ -465,8 +409,6 @@ RUN \
|
|||||||
--enable-libopencore-amrnb \
|
--enable-libopencore-amrnb \
|
||||||
--enable-libopencore-amrwb \
|
--enable-libopencore-amrwb \
|
||||||
--enable-gpl \
|
--enable-gpl \
|
||||||
--enable-libass \
|
|
||||||
--enable-fontconfig \
|
|
||||||
--enable-libfreetype \
|
--enable-libfreetype \
|
||||||
--enable-libvidstab \
|
--enable-libvidstab \
|
||||||
--enable-libmp3lame \
|
--enable-libmp3lame \
|
||||||
@@ -485,7 +427,6 @@ RUN \
|
|||||||
--enable-postproc \
|
--enable-postproc \
|
||||||
--enable-small \
|
--enable-small \
|
||||||
--enable-version3 \
|
--enable-version3 \
|
||||||
--enable-libbluray \
|
|
||||||
--enable-libzmq \
|
--enable-libzmq \
|
||||||
--extra-libs=-ldl \
|
--extra-libs=-ldl \
|
||||||
--prefix="${PREFIX}" \
|
--prefix="${PREFIX}" \
|
||||||
@@ -17,12 +17,10 @@ FROM base as build
|
|||||||
ENV FFMPEG_VERSION=4.3.1 \
|
ENV FFMPEG_VERSION=4.3.1 \
|
||||||
AOM_VERSION=v1.0.0 \
|
AOM_VERSION=v1.0.0 \
|
||||||
FDKAAC_VERSION=0.1.5 \
|
FDKAAC_VERSION=0.1.5 \
|
||||||
FONTCONFIG_VERSION=2.12.4 \
|
|
||||||
FREETYPE_VERSION=2.5.5 \
|
FREETYPE_VERSION=2.5.5 \
|
||||||
FRIBIDI_VERSION=0.19.7 \
|
FRIBIDI_VERSION=0.19.7 \
|
||||||
KVAZAAR_VERSION=1.2.0 \
|
KVAZAAR_VERSION=1.2.0 \
|
||||||
LAME_VERSION=3.100 \
|
LAME_VERSION=3.100 \
|
||||||
LIBASS_VERSION=0.13.7 \
|
|
||||||
LIBPTHREAD_STUBS_VERSION=0.4 \
|
LIBPTHREAD_STUBS_VERSION=0.4 \
|
||||||
LIBVIDSTAB_VERSION=1.1.0 \
|
LIBVIDSTAB_VERSION=1.1.0 \
|
||||||
LIBXCB_VERSION=1.13.1 \
|
LIBXCB_VERSION=1.13.1 \
|
||||||
@@ -41,22 +39,17 @@ ENV FFMPEG_VERSION=4.3.1 \
|
|||||||
XORG_MACROS_VERSION=1.19.2 \
|
XORG_MACROS_VERSION=1.19.2 \
|
||||||
XPROTO_VERSION=7.0.31 \
|
XPROTO_VERSION=7.0.31 \
|
||||||
XVID_VERSION=1.3.4 \
|
XVID_VERSION=1.3.4 \
|
||||||
LIBXML2_VERSION=2.9.10 \
|
|
||||||
LIBBLURAY_VERSION=1.1.2 \
|
|
||||||
LIBZMQ_VERSION=4.3.2 \
|
LIBZMQ_VERSION=4.3.2 \
|
||||||
SRC=/usr/local
|
SRC=/usr/local
|
||||||
|
|
||||||
ARG FREETYPE_SHA256SUM="5d03dd76c2171a7601e9ce10551d52d4471cf92cd205948e60289251daddffa8 freetype-2.5.5.tar.gz"
|
ARG FREETYPE_SHA256SUM="5d03dd76c2171a7601e9ce10551d52d4471cf92cd205948e60289251daddffa8 freetype-2.5.5.tar.gz"
|
||||||
ARG FRIBIDI_SHA256SUM="3fc96fa9473bd31dcb5500bdf1aa78b337ba13eb8c301e7c28923fea982453a8 0.19.7.tar.gz"
|
ARG FRIBIDI_SHA256SUM="3fc96fa9473bd31dcb5500bdf1aa78b337ba13eb8c301e7c28923fea982453a8 0.19.7.tar.gz"
|
||||||
ARG LIBASS_SHA256SUM="8fadf294bf701300d4605e6f1d92929304187fca4b8d8a47889315526adbafd7 0.13.7.tar.gz"
|
|
||||||
ARG LIBVIDSTAB_SHA256SUM="14d2a053e56edad4f397be0cb3ef8eb1ec3150404ce99a426c4eb641861dc0bb v1.1.0.tar.gz"
|
ARG LIBVIDSTAB_SHA256SUM="14d2a053e56edad4f397be0cb3ef8eb1ec3150404ce99a426c4eb641861dc0bb v1.1.0.tar.gz"
|
||||||
ARG OGG_SHA256SUM="e19ee34711d7af328cb26287f4137e70630e7261b17cbe3cd41011d73a654692 libogg-1.3.2.tar.gz"
|
ARG OGG_SHA256SUM="e19ee34711d7af328cb26287f4137e70630e7261b17cbe3cd41011d73a654692 libogg-1.3.2.tar.gz"
|
||||||
ARG OPUS_SHA256SUM="77db45a87b51578fbc49555ef1b10926179861d854eb2613207dc79d9ec0a9a9 opus-1.2.tar.gz"
|
ARG OPUS_SHA256SUM="77db45a87b51578fbc49555ef1b10926179861d854eb2613207dc79d9ec0a9a9 opus-1.2.tar.gz"
|
||||||
ARG THEORA_SHA256SUM="40952956c47811928d1e7922cda3bc1f427eb75680c3c37249c91e949054916b libtheora-1.1.1.tar.gz"
|
ARG THEORA_SHA256SUM="40952956c47811928d1e7922cda3bc1f427eb75680c3c37249c91e949054916b libtheora-1.1.1.tar.gz"
|
||||||
ARG VORBIS_SHA256SUM="6efbcecdd3e5dfbf090341b485da9d176eb250d893e3eb378c428a2db38301ce libvorbis-1.3.5.tar.gz"
|
ARG VORBIS_SHA256SUM="6efbcecdd3e5dfbf090341b485da9d176eb250d893e3eb378c428a2db38301ce libvorbis-1.3.5.tar.gz"
|
||||||
ARG XVID_SHA256SUM="4e9fd62728885855bc5007fe1be58df42e5e274497591fec37249e1052ae316f xvidcore-1.3.4.tar.gz"
|
ARG XVID_SHA256SUM="4e9fd62728885855bc5007fe1be58df42e5e274497591fec37249e1052ae316f xvidcore-1.3.4.tar.gz"
|
||||||
ARG LIBXML2_SHA256SUM="f07dab13bf42d2b8db80620cce7419b3b87827cc937c8bb20fe13b8571ee9501 libxml2-v2.9.10.tar.gz"
|
|
||||||
ARG LIBBLURAY_SHA256SUM="a3dd452239b100dc9da0d01b30e1692693e2a332a7d29917bf84bb10ea7c0b42 libbluray-1.1.2.tar.bz2"
|
|
||||||
ARG LIBZMQ_SHA256SUM="02ecc88466ae38cf2c8d79f09cfd2675ba299a439680b64ade733e26a349edeb v4.3.2.tar.gz"
|
ARG LIBZMQ_SHA256SUM="02ecc88466ae38cf2c8d79f09cfd2675ba299a439680b64ade733e26a349edeb v4.3.2.tar.gz"
|
||||||
|
|
||||||
|
|
||||||
@@ -281,30 +274,6 @@ RUN \
|
|||||||
make -j1 && \
|
make -j1 && \
|
||||||
make install && \
|
make install && \
|
||||||
rm -rf ${DIR}
|
rm -rf ${DIR}
|
||||||
## fontconfig https://www.freedesktop.org/wiki/Software/fontconfig/
|
|
||||||
RUN \
|
|
||||||
DIR=/tmp/fontconfig && \
|
|
||||||
mkdir -p ${DIR} && \
|
|
||||||
cd ${DIR} && \
|
|
||||||
curl -sLO https://www.freedesktop.org/software/fontconfig/release/fontconfig-${FONTCONFIG_VERSION}.tar.bz2 && \
|
|
||||||
tar -jx --strip-components=1 -f fontconfig-${FONTCONFIG_VERSION}.tar.bz2 && \
|
|
||||||
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
|
||||||
make && \
|
|
||||||
make install && \
|
|
||||||
rm -rf ${DIR}
|
|
||||||
## libass https://github.com/libass/libass
|
|
||||||
RUN \
|
|
||||||
DIR=/tmp/libass && \
|
|
||||||
mkdir -p ${DIR} && \
|
|
||||||
cd ${DIR} && \
|
|
||||||
curl -sLO https://github.com/libass/libass/archive/${LIBASS_VERSION}.tar.gz && \
|
|
||||||
echo ${LIBASS_SHA256SUM} | sha256sum --check && \
|
|
||||||
tar -zx --strip-components=1 -f ${LIBASS_VERSION}.tar.gz && \
|
|
||||||
./autogen.sh && \
|
|
||||||
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
|
||||||
make && \
|
|
||||||
make install && \
|
|
||||||
rm -rf ${DIR}
|
|
||||||
## kvazaar https://github.com/ultravideo/kvazaar
|
## kvazaar https://github.com/ultravideo/kvazaar
|
||||||
RUN \
|
RUN \
|
||||||
DIR=/tmp/kvazaar && \
|
DIR=/tmp/kvazaar && \
|
||||||
@@ -399,32 +368,6 @@ RUN \
|
|||||||
make install && \
|
make install && \
|
||||||
rm -rf ${DIR}
|
rm -rf ${DIR}
|
||||||
|
|
||||||
## libxml2 - for libbluray
|
|
||||||
RUN \
|
|
||||||
DIR=/tmp/libxml2 && \
|
|
||||||
mkdir -p ${DIR} && \
|
|
||||||
cd ${DIR} && \
|
|
||||||
curl -sLO https://gitlab.gnome.org/GNOME/libxml2/-/archive/v${LIBXML2_VERSION}/libxml2-v${LIBXML2_VERSION}.tar.gz && \
|
|
||||||
echo ${LIBXML2_SHA256SUM} | sha256sum --check && \
|
|
||||||
tar -xz --strip-components=1 -f libxml2-v${LIBXML2_VERSION}.tar.gz && \
|
|
||||||
./autogen.sh --prefix="${PREFIX}" --with-ftp=no --with-http=no --with-python=no && \
|
|
||||||
make && \
|
|
||||||
make install && \
|
|
||||||
rm -rf ${DIR}
|
|
||||||
|
|
||||||
## libbluray - Requires libxml, freetype, and fontconfig
|
|
||||||
RUN \
|
|
||||||
DIR=/tmp/libbluray && \
|
|
||||||
mkdir -p ${DIR} && \
|
|
||||||
cd ${DIR} && \
|
|
||||||
curl -sLO https://download.videolan.org/pub/videolan/libbluray/${LIBBLURAY_VERSION}/libbluray-${LIBBLURAY_VERSION}.tar.bz2 && \
|
|
||||||
echo ${LIBBLURAY_SHA256SUM} | sha256sum --check && \
|
|
||||||
tar -jx --strip-components=1 -f libbluray-${LIBBLURAY_VERSION}.tar.bz2 && \
|
|
||||||
./configure --prefix="${PREFIX}" --disable-examples --disable-bdjava-jar --disable-static --enable-shared && \
|
|
||||||
make && \
|
|
||||||
make install && \
|
|
||||||
rm -rf ${DIR}
|
|
||||||
|
|
||||||
## libzmq https://github.com/zeromq/libzmq/
|
## libzmq https://github.com/zeromq/libzmq/
|
||||||
RUN \
|
RUN \
|
||||||
DIR=/tmp/libzmq && \
|
DIR=/tmp/libzmq && \
|
||||||
@@ -459,8 +402,6 @@ RUN \
|
|||||||
--enable-libopencore-amrnb \
|
--enable-libopencore-amrnb \
|
||||||
--enable-libopencore-amrwb \
|
--enable-libopencore-amrwb \
|
||||||
--enable-gpl \
|
--enable-gpl \
|
||||||
--enable-libass \
|
|
||||||
--enable-fontconfig \
|
|
||||||
--enable-libfreetype \
|
--enable-libfreetype \
|
||||||
--enable-libvidstab \
|
--enable-libvidstab \
|
||||||
--enable-libmp3lame \
|
--enable-libmp3lame \
|
||||||
@@ -479,7 +420,6 @@ RUN \
|
|||||||
--enable-postproc \
|
--enable-postproc \
|
||||||
--enable-small \
|
--enable-small \
|
||||||
--enable-version3 \
|
--enable-version3 \
|
||||||
--enable-libbluray \
|
|
||||||
--enable-libzmq \
|
--enable-libzmq \
|
||||||
--extra-libs=-ldl \
|
--extra-libs=-ldl \
|
||||||
--prefix="${PREFIX}" \
|
--prefix="${PREFIX}" \
|
||||||
@@ -522,5 +462,5 @@ COPY --from=build /usr/local /usr/local/
|
|||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
apt-get update -y && \
|
apt-get update -y && \
|
||||||
apt-get install -y --no-install-recommends libva-drm2 libva2 i965-va-driver && \
|
apt-get install -y --no-install-recommends libva-drm2 libva2 i965-va-driver mesa-va-drivers && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|||||||
549
docker/Dockerfile.ffmpeg.amd64nvidia
Normal file
@@ -0,0 +1,549 @@
|
|||||||
|
# inspired by https://github.com/jrottenberg/ffmpeg/blob/master/docker-images/4.3/ubuntu1804/Dockerfile
|
||||||
|
|
||||||
|
# ffmpeg - http://ffmpeg.org/download.html
|
||||||
|
#
|
||||||
|
# From https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu
|
||||||
|
#
|
||||||
|
# https://hub.docker.com/r/jrottenberg/ffmpeg/
|
||||||
|
#
|
||||||
|
#
|
||||||
|
|
||||||
|
FROM nvidia/cuda:11.1-devel-ubuntu20.04 AS devel-base
|
||||||
|
|
||||||
|
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,video
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
WORKDIR /tmp/workdir
|
||||||
|
|
||||||
|
RUN apt-get -yqq update && \
|
||||||
|
apt-get install -yq --no-install-recommends ca-certificates expat libgomp1 && \
|
||||||
|
apt-get autoremove -y && \
|
||||||
|
apt-get clean -y
|
||||||
|
|
||||||
|
FROM nvidia/cuda:11.1-runtime-ubuntu20.04 AS runtime-base
|
||||||
|
|
||||||
|
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,video
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
WORKDIR /tmp/workdir
|
||||||
|
|
||||||
|
RUN apt-get -yqq update && \
|
||||||
|
apt-get install -yq --no-install-recommends ca-certificates expat libgomp1 libxcb-shape0-dev && \
|
||||||
|
apt-get autoremove -y && \
|
||||||
|
apt-get clean -y
|
||||||
|
|
||||||
|
|
||||||
|
FROM devel-base as build
|
||||||
|
|
||||||
|
ENV NVIDIA_HEADERS_VERSION=9.1.23.1
|
||||||
|
|
||||||
|
ENV FFMPEG_VERSION=4.3.1 \
|
||||||
|
AOM_VERSION=v1.0.0 \
|
||||||
|
FDKAAC_VERSION=0.1.5 \
|
||||||
|
FREETYPE_VERSION=2.5.5 \
|
||||||
|
FRIBIDI_VERSION=0.19.7 \
|
||||||
|
KVAZAAR_VERSION=1.2.0 \
|
||||||
|
LAME_VERSION=3.100 \
|
||||||
|
LIBPTHREAD_STUBS_VERSION=0.4 \
|
||||||
|
LIBVIDSTAB_VERSION=1.1.0 \
|
||||||
|
LIBXCB_VERSION=1.13.1 \
|
||||||
|
XCBPROTO_VERSION=1.13 \
|
||||||
|
OGG_VERSION=1.3.2 \
|
||||||
|
OPENCOREAMR_VERSION=0.1.5 \
|
||||||
|
OPUS_VERSION=1.2 \
|
||||||
|
OPENJPEG_VERSION=2.1.2 \
|
||||||
|
THEORA_VERSION=1.1.1 \
|
||||||
|
VORBIS_VERSION=1.3.5 \
|
||||||
|
VPX_VERSION=1.8.0 \
|
||||||
|
WEBP_VERSION=1.0.2 \
|
||||||
|
X264_VERSION=20170226-2245-stable \
|
||||||
|
X265_VERSION=3.1.1 \
|
||||||
|
XAU_VERSION=1.0.9 \
|
||||||
|
XORG_MACROS_VERSION=1.19.2 \
|
||||||
|
XPROTO_VERSION=7.0.31 \
|
||||||
|
XVID_VERSION=1.3.4 \
|
||||||
|
LIBZMQ_VERSION=4.3.2 \
|
||||||
|
LIBSRT_VERSION=1.4.1 \
|
||||||
|
LIBARIBB24_VERSION=1.0.3 \
|
||||||
|
LIBPNG_VERSION=1.6.9 \
|
||||||
|
SRC=/usr/local
|
||||||
|
|
||||||
|
ARG FREETYPE_SHA256SUM="5d03dd76c2171a7601e9ce10551d52d4471cf92cd205948e60289251daddffa8 freetype-2.5.5.tar.gz"
|
||||||
|
ARG FRIBIDI_SHA256SUM="3fc96fa9473bd31dcb5500bdf1aa78b337ba13eb8c301e7c28923fea982453a8 0.19.7.tar.gz"
|
||||||
|
ARG LIBVIDSTAB_SHA256SUM="14d2a053e56edad4f397be0cb3ef8eb1ec3150404ce99a426c4eb641861dc0bb v1.1.0.tar.gz"
|
||||||
|
ARG OGG_SHA256SUM="e19ee34711d7af328cb26287f4137e70630e7261b17cbe3cd41011d73a654692 libogg-1.3.2.tar.gz"
|
||||||
|
ARG OPUS_SHA256SUM="77db45a87b51578fbc49555ef1b10926179861d854eb2613207dc79d9ec0a9a9 opus-1.2.tar.gz"
|
||||||
|
ARG THEORA_SHA256SUM="40952956c47811928d1e7922cda3bc1f427eb75680c3c37249c91e949054916b libtheora-1.1.1.tar.gz"
|
||||||
|
ARG VORBIS_SHA256SUM="6efbcecdd3e5dfbf090341b485da9d176eb250d893e3eb378c428a2db38301ce libvorbis-1.3.5.tar.gz"
|
||||||
|
ARG XVID_SHA256SUM="4e9fd62728885855bc5007fe1be58df42e5e274497591fec37249e1052ae316f xvidcore-1.3.4.tar.gz"
|
||||||
|
ARG LIBZMQ_SHA256SUM="02ecc88466ae38cf2c8d79f09cfd2675ba299a439680b64ade733e26a349edeb v4.3.2.tar.gz"
|
||||||
|
ARG LIBARIBB24_SHA256SUM="f61560738926e57f9173510389634d8c06cabedfa857db4b28fb7704707ff128 v1.0.3.tar.gz"
|
||||||
|
|
||||||
|
|
||||||
|
ARG LD_LIBRARY_PATH=/opt/ffmpeg/lib
|
||||||
|
ARG MAKEFLAGS="-j2"
|
||||||
|
ARG PKG_CONFIG_PATH="/opt/ffmpeg/share/pkgconfig:/opt/ffmpeg/lib/pkgconfig:/opt/ffmpeg/lib64/pkgconfig"
|
||||||
|
ARG PREFIX=/opt/ffmpeg
|
||||||
|
ARG LD_LIBRARY_PATH="/opt/ffmpeg/lib:/opt/ffmpeg/lib64"
|
||||||
|
|
||||||
|
|
||||||
|
RUN buildDeps="autoconf \
|
||||||
|
automake \
|
||||||
|
cmake \
|
||||||
|
curl \
|
||||||
|
bzip2 \
|
||||||
|
libexpat1-dev \
|
||||||
|
g++ \
|
||||||
|
gcc \
|
||||||
|
git \
|
||||||
|
gperf \
|
||||||
|
libtool \
|
||||||
|
make \
|
||||||
|
nasm \
|
||||||
|
perl \
|
||||||
|
pkg-config \
|
||||||
|
python \
|
||||||
|
libssl-dev \
|
||||||
|
yasm \
|
||||||
|
zlib1g-dev" && \
|
||||||
|
apt-get -yqq update && \
|
||||||
|
apt-get install -yq --no-install-recommends ${buildDeps}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/nv-codec-headers && \
|
||||||
|
git clone https://github.com/FFmpeg/nv-codec-headers ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
git checkout n${NVIDIA_HEADERS_VERSION} && \
|
||||||
|
make PREFIX="${PREFIX}" && \
|
||||||
|
make install PREFIX="${PREFIX}" && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## opencore-amr https://sourceforge.net/projects/opencore-amr/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/opencore-amr && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://versaweb.dl.sourceforge.net/project/opencore-amr/opencore-amr/opencore-amr-${OPENCOREAMR_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## x264 http://www.videolan.org/developers/x264.html
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/x264 && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://download.videolan.org/pub/videolan/x264/snapshots/x264-snapshot-${X264_VERSION}.tar.bz2 | \
|
||||||
|
tar -jx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared --enable-pic --disable-cli && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### x265 http://x265.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/x265 && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://download.videolan.org/pub/videolan/x265/x265_${X265_VERSION}.tar.gz | \
|
||||||
|
tar -zx && \
|
||||||
|
cd x265_${X265_VERSION}/build/linux && \
|
||||||
|
sed -i "/-DEXTRA_LIB/ s/$/ -DCMAKE_INSTALL_PREFIX=\${PREFIX}/" multilib.sh && \
|
||||||
|
sed -i "/^cmake/ s/$/ -DENABLE_CLI=OFF/" multilib.sh && \
|
||||||
|
./multilib.sh && \
|
||||||
|
make -C 8bit install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libogg https://www.xiph.org/ogg/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ogg && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/ogg/libogg-${OGG_VERSION}.tar.gz && \
|
||||||
|
echo ${OGG_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libogg-${OGG_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libopus https://www.opus-codec.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/opus && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://archive.mozilla.org/pub/opus/opus-${OPUS_VERSION}.tar.gz && \
|
||||||
|
echo ${OPUS_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f opus-${OPUS_VERSION}.tar.gz && \
|
||||||
|
autoreconf -fiv && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libvorbis https://xiph.org/vorbis/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vorbis && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/vorbis/libvorbis-${VORBIS_VERSION}.tar.gz && \
|
||||||
|
echo ${VORBIS_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libvorbis-${VORBIS_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --with-ogg="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libtheora http://www.theora.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/theora && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/theora/libtheora-${THEORA_VERSION}.tar.gz && \
|
||||||
|
echo ${THEORA_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libtheora-${THEORA_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --with-ogg="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libvpx https://www.webmproject.org/code/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vpx && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://codeload.github.com/webmproject/libvpx/tar.gz/v${VPX_VERSION} | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-vp8 --enable-vp9 --enable-vp9-highbitdepth --enable-pic --enable-shared \
|
||||||
|
--disable-debug --disable-examples --disable-docs --disable-install-bins && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libwebp https://developers.google.com/speed/webp/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vebp && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://storage.googleapis.com/downloads.webmproject.org/releases/webp/libwebp-${WEBP_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libmp3lame http://lame.sourceforge.net/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/lame && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://versaweb.dl.sourceforge.net/project/lame/lame/$(echo ${LAME_VERSION} | sed -e 's/[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z-]*\)/\1.\2/')/lame-${LAME_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --bindir="${PREFIX}/bin" --enable-shared --enable-nasm --disable-frontend && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### xvid https://www.xvid.com/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xvid && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xvid.org/downloads/xvidcore-${XVID_VERSION}.tar.gz && \
|
||||||
|
echo ${XVID_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx -f xvidcore-${XVID_VERSION}.tar.gz && \
|
||||||
|
cd xvidcore/build/generic && \
|
||||||
|
./configure --prefix="${PREFIX}" --bindir="${PREFIX}/bin" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### fdk-aac https://github.com/mstorsjo/fdk-aac
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/fdk-aac && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://github.com/mstorsjo/fdk-aac/archive/v${FDKAAC_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
autoreconf -fiv && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared --datadir="${DIR}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## openjpeg https://github.com/uclouvain/openjpeg
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/openjpeg && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://github.com/uclouvain/openjpeg/archive/v${OPENJPEG_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
cmake -DBUILD_THIRDPARTY:BOOL=ON -DCMAKE_INSTALL_PREFIX="${PREFIX}" . && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## freetype https://www.freetype.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/freetype && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://download.savannah.gnu.org/releases/freetype/freetype-${FREETYPE_VERSION}.tar.gz && \
|
||||||
|
echo ${FREETYPE_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f freetype-${FREETYPE_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## libvstab https://github.com/georgmartius/vid.stab
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vid.stab && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/georgmartius/vid.stab/archive/v${LIBVIDSTAB_VERSION}.tar.gz && \
|
||||||
|
echo ${LIBVIDSTAB_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f v${LIBVIDSTAB_VERSION}.tar.gz && \
|
||||||
|
cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" . && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## fridibi https://www.fribidi.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/fribidi && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/fribidi/fribidi/archive/${FRIBIDI_VERSION}.tar.gz && \
|
||||||
|
echo ${FRIBIDI_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f ${FRIBIDI_VERSION}.tar.gz && \
|
||||||
|
sed -i 's/^SUBDIRS =.*/SUBDIRS=gen.tab charset lib bin/' Makefile.am && \
|
||||||
|
./bootstrap --no-config --auto && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make -j1 && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## kvazaar https://github.com/ultravideo/kvazaar
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/kvazaar && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/ultravideo/kvazaar/archive/v${KVAZAAR_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f v${KVAZAAR_VERSION}.tar.gz && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/aom && \
|
||||||
|
git clone --branch ${AOM_VERSION} --depth 1 https://aomedia.googlesource.com/aom ${DIR} ; \
|
||||||
|
cd ${DIR} ; \
|
||||||
|
rm -rf CMakeCache.txt CMakeFiles ; \
|
||||||
|
mkdir -p ./aom_build ; \
|
||||||
|
cd ./aom_build ; \
|
||||||
|
cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" -DBUILD_SHARED_LIBS=1 ..; \
|
||||||
|
make ; \
|
||||||
|
make install ; \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libxcb (and supporting libraries) for screen capture https://xcb.freedesktop.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xorg-macros && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive//individual/util/util-macros-${XORG_MACROS_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f util-macros-${XORG_MACROS_VERSION}.tar.gz && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xproto && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive/individual/proto/xproto-${XPROTO_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f xproto-${XPROTO_VERSION}.tar.gz && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libXau && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive/individual/lib/libXau-${XAU_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libXau-${XAU_VERSION}.tar.gz && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libpthread-stubs && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/libpthread-stubs-${LIBPTHREAD_STUBS_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libpthread-stubs-${LIBPTHREAD_STUBS_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libxcb-proto && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/xcb-proto-${XCBPROTO_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f xcb-proto-${XCBPROTO_VERSION}.tar.gz && \
|
||||||
|
ACLOCAL_PATH="${PREFIX}/share/aclocal" ./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libxcb && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/libxcb-${LIBXCB_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libxcb-${LIBXCB_VERSION}.tar.gz && \
|
||||||
|
ACLOCAL_PATH="${PREFIX}/share/aclocal" ./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libzmq https://github.com/zeromq/libzmq/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libzmq && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/zeromq/libzmq/archive/v${LIBZMQ_VERSION}.tar.gz && \
|
||||||
|
echo ${LIBZMQ_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -xz --strip-components=1 -f v${LIBZMQ_VERSION}.tar.gz && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make check && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libsrt https://github.com/Haivision/srt
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/srt && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/Haivision/srt/archive/v${LIBSRT_VERSION}.tar.gz && \
|
||||||
|
tar -xz --strip-components=1 -f v${LIBSRT_VERSION}.tar.gz && \
|
||||||
|
cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" . && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libpng
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/png && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
git clone https://git.code.sf.net/p/libpng/code ${DIR} -b v${LIBPNG_VERSION} --depth 1 && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make check && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libaribb24
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/b24 && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/nkoriyama/aribb24/archive/v${LIBARIBB24_VERSION}.tar.gz && \
|
||||||
|
echo ${LIBARIBB24_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -xz --strip-components=1 -f v${LIBARIBB24_VERSION}.tar.gz && \
|
||||||
|
autoreconf -fiv && \
|
||||||
|
./configure CFLAGS="-I${PREFIX}/include -fPIC" --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## ffmpeg https://ffmpeg.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ffmpeg && mkdir -p ${DIR} && cd ${DIR} && \
|
||||||
|
curl -sLO https://ffmpeg.org/releases/ffmpeg-${FFMPEG_VERSION}.tar.bz2 && \
|
||||||
|
tar -jx --strip-components=1 -f ffmpeg-${FFMPEG_VERSION}.tar.bz2
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ffmpeg && mkdir -p ${DIR} && cd ${DIR} && \
|
||||||
|
./configure \
|
||||||
|
--disable-debug \
|
||||||
|
--disable-doc \
|
||||||
|
--disable-ffplay \
|
||||||
|
--enable-shared \
|
||||||
|
--enable-avresample \
|
||||||
|
--enable-libopencore-amrnb \
|
||||||
|
--enable-libopencore-amrwb \
|
||||||
|
--enable-gpl \
|
||||||
|
--enable-libfreetype \
|
||||||
|
--enable-libvidstab \
|
||||||
|
--enable-libmp3lame \
|
||||||
|
--enable-libopus \
|
||||||
|
--enable-libtheora \
|
||||||
|
--enable-libvorbis \
|
||||||
|
--enable-libvpx \
|
||||||
|
--enable-libwebp \
|
||||||
|
--enable-libxcb \
|
||||||
|
--enable-libx265 \
|
||||||
|
--enable-libxvid \
|
||||||
|
--enable-libx264 \
|
||||||
|
--enable-nonfree \
|
||||||
|
--enable-openssl \
|
||||||
|
--enable-libfdk_aac \
|
||||||
|
--enable-postproc \
|
||||||
|
--enable-small \
|
||||||
|
--enable-version3 \
|
||||||
|
--enable-libzmq \
|
||||||
|
--extra-libs=-ldl \
|
||||||
|
--prefix="${PREFIX}" \
|
||||||
|
--enable-libopenjpeg \
|
||||||
|
--enable-libkvazaar \
|
||||||
|
--enable-libaom \
|
||||||
|
--extra-libs=-lpthread \
|
||||||
|
--enable-libsrt \
|
||||||
|
--enable-libaribb24 \
|
||||||
|
--enable-nvenc \
|
||||||
|
--enable-cuda \
|
||||||
|
--enable-cuvid \
|
||||||
|
--enable-libnpp \
|
||||||
|
--extra-cflags="-I${PREFIX}/include -I${PREFIX}/include/ffnvcodec -I/usr/local/cuda/include/" \
|
||||||
|
--extra-ldflags="-L${PREFIX}/lib -L/usr/local/cuda/lib64 -L/usr/local/cuda/lib32/" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
make tools/zmqsend && cp tools/zmqsend ${PREFIX}/bin/ && \
|
||||||
|
make distclean && \
|
||||||
|
hash -r && \
|
||||||
|
cd tools && \
|
||||||
|
make qt-faststart && cp qt-faststart ${PREFIX}/bin/
|
||||||
|
|
||||||
|
## cleanup
|
||||||
|
RUN \
|
||||||
|
LD_LIBRARY_PATH="${PREFIX}/lib:${PREFIX}/lib64:${LD_LIBRARY_PATH}" ldd ${PREFIX}/bin/ffmpeg | grep opt/ffmpeg | cut -d ' ' -f 3 | xargs -i cp {} /usr/local/lib/ && \
|
||||||
|
for lib in /usr/local/lib/*.so.*; do ln -s "${lib##*/}" "${lib%%.so.*}".so; done && \
|
||||||
|
cp ${PREFIX}/bin/* /usr/local/bin/ && \
|
||||||
|
cp -r ${PREFIX}/share/* /usr/local/share/ && \
|
||||||
|
LD_LIBRARY_PATH=/usr/local/lib ffmpeg -buildconf && \
|
||||||
|
cp -r ${PREFIX}/include/libav* ${PREFIX}/include/libpostproc ${PREFIX}/include/libsw* /usr/local/include && \
|
||||||
|
mkdir -p /usr/local/lib/pkgconfig && \
|
||||||
|
for pc in ${PREFIX}/lib/pkgconfig/libav*.pc ${PREFIX}/lib/pkgconfig/libpostproc.pc ${PREFIX}/lib/pkgconfig/libsw*.pc; do \
|
||||||
|
sed "s:${PREFIX}:/usr/local:g; s:/lib64:/lib:g" <"$pc" >/usr/local/lib/pkgconfig/"${pc##*/}"; \
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
FROM runtime-base AS release
|
||||||
|
|
||||||
|
ENV LD_LIBRARY_PATH=/usr/local/lib:/usr/local/lib64
|
||||||
|
|
||||||
|
CMD ["--help"]
|
||||||
|
ENTRYPOINT ["ffmpeg"]
|
||||||
|
|
||||||
|
# copy only needed files, without copying nvidia dev files
|
||||||
|
COPY --from=build /usr/local/bin /usr/local/bin/
|
||||||
|
COPY --from=build /usr/local/share /usr/local/share/
|
||||||
|
COPY --from=build /usr/local/lib /usr/local/lib/
|
||||||
|
COPY --from=build /usr/local/include /usr/local/include/
|
||||||
|
|
||||||
|
# Let's make sure the app built correctly
|
||||||
|
# Convenient to verify on https://hub.docker.com/r/jrottenberg/ffmpeg/builds/ console output
|
||||||
@@ -18,12 +18,10 @@ FROM base as build
|
|||||||
ENV FFMPEG_VERSION=4.3.1 \
|
ENV FFMPEG_VERSION=4.3.1 \
|
||||||
AOM_VERSION=v1.0.0 \
|
AOM_VERSION=v1.0.0 \
|
||||||
FDKAAC_VERSION=0.1.5 \
|
FDKAAC_VERSION=0.1.5 \
|
||||||
FONTCONFIG_VERSION=2.12.4 \
|
|
||||||
FREETYPE_VERSION=2.5.5 \
|
FREETYPE_VERSION=2.5.5 \
|
||||||
FRIBIDI_VERSION=0.19.7 \
|
FRIBIDI_VERSION=0.19.7 \
|
||||||
KVAZAAR_VERSION=1.2.0 \
|
KVAZAAR_VERSION=1.2.0 \
|
||||||
LAME_VERSION=3.100 \
|
LAME_VERSION=3.100 \
|
||||||
LIBASS_VERSION=0.13.7 \
|
|
||||||
LIBPTHREAD_STUBS_VERSION=0.4 \
|
LIBPTHREAD_STUBS_VERSION=0.4 \
|
||||||
LIBVIDSTAB_VERSION=1.1.0 \
|
LIBVIDSTAB_VERSION=1.1.0 \
|
||||||
LIBXCB_VERSION=1.13.1 \
|
LIBXCB_VERSION=1.13.1 \
|
||||||
@@ -42,22 +40,17 @@ ENV FFMPEG_VERSION=4.3.1 \
|
|||||||
XORG_MACROS_VERSION=1.19.2 \
|
XORG_MACROS_VERSION=1.19.2 \
|
||||||
XPROTO_VERSION=7.0.31 \
|
XPROTO_VERSION=7.0.31 \
|
||||||
XVID_VERSION=1.3.4 \
|
XVID_VERSION=1.3.4 \
|
||||||
LIBXML2_VERSION=2.9.10 \
|
|
||||||
LIBBLURAY_VERSION=1.1.2 \
|
|
||||||
LIBZMQ_VERSION=4.3.3 \
|
LIBZMQ_VERSION=4.3.3 \
|
||||||
SRC=/usr/local
|
SRC=/usr/local
|
||||||
|
|
||||||
ARG FREETYPE_SHA256SUM="5d03dd76c2171a7601e9ce10551d52d4471cf92cd205948e60289251daddffa8 freetype-2.5.5.tar.gz"
|
ARG FREETYPE_SHA256SUM="5d03dd76c2171a7601e9ce10551d52d4471cf92cd205948e60289251daddffa8 freetype-2.5.5.tar.gz"
|
||||||
ARG FRIBIDI_SHA256SUM="3fc96fa9473bd31dcb5500bdf1aa78b337ba13eb8c301e7c28923fea982453a8 0.19.7.tar.gz"
|
ARG FRIBIDI_SHA256SUM="3fc96fa9473bd31dcb5500bdf1aa78b337ba13eb8c301e7c28923fea982453a8 0.19.7.tar.gz"
|
||||||
ARG LIBASS_SHA256SUM="8fadf294bf701300d4605e6f1d92929304187fca4b8d8a47889315526adbafd7 0.13.7.tar.gz"
|
|
||||||
ARG LIBVIDSTAB_SHA256SUM="14d2a053e56edad4f397be0cb3ef8eb1ec3150404ce99a426c4eb641861dc0bb v1.1.0.tar.gz"
|
ARG LIBVIDSTAB_SHA256SUM="14d2a053e56edad4f397be0cb3ef8eb1ec3150404ce99a426c4eb641861dc0bb v1.1.0.tar.gz"
|
||||||
ARG OGG_SHA256SUM="e19ee34711d7af328cb26287f4137e70630e7261b17cbe3cd41011d73a654692 libogg-1.3.2.tar.gz"
|
ARG OGG_SHA256SUM="e19ee34711d7af328cb26287f4137e70630e7261b17cbe3cd41011d73a654692 libogg-1.3.2.tar.gz"
|
||||||
ARG OPUS_SHA256SUM="77db45a87b51578fbc49555ef1b10926179861d854eb2613207dc79d9ec0a9a9 opus-1.2.tar.gz"
|
ARG OPUS_SHA256SUM="77db45a87b51578fbc49555ef1b10926179861d854eb2613207dc79d9ec0a9a9 opus-1.2.tar.gz"
|
||||||
ARG THEORA_SHA256SUM="40952956c47811928d1e7922cda3bc1f427eb75680c3c37249c91e949054916b libtheora-1.1.1.tar.gz"
|
ARG THEORA_SHA256SUM="40952956c47811928d1e7922cda3bc1f427eb75680c3c37249c91e949054916b libtheora-1.1.1.tar.gz"
|
||||||
ARG VORBIS_SHA256SUM="6efbcecdd3e5dfbf090341b485da9d176eb250d893e3eb378c428a2db38301ce libvorbis-1.3.5.tar.gz"
|
ARG VORBIS_SHA256SUM="6efbcecdd3e5dfbf090341b485da9d176eb250d893e3eb378c428a2db38301ce libvorbis-1.3.5.tar.gz"
|
||||||
ARG XVID_SHA256SUM="4e9fd62728885855bc5007fe1be58df42e5e274497591fec37249e1052ae316f xvidcore-1.3.4.tar.gz"
|
ARG XVID_SHA256SUM="4e9fd62728885855bc5007fe1be58df42e5e274497591fec37249e1052ae316f xvidcore-1.3.4.tar.gz"
|
||||||
ARG LIBXML2_SHA256SUM="f07dab13bf42d2b8db80620cce7419b3b87827cc937c8bb20fe13b8571ee9501 libxml2-v2.9.10.tar.gz"
|
|
||||||
ARG LIBBLURAY_SHA256SUM="a3dd452239b100dc9da0d01b30e1692693e2a332a7d29917bf84bb10ea7c0b42 libbluray-1.1.2.tar.bz2"
|
|
||||||
|
|
||||||
|
|
||||||
ARG LD_LIBRARY_PATH=/opt/ffmpeg/lib
|
ARG LD_LIBRARY_PATH=/opt/ffmpeg/lib
|
||||||
@@ -289,30 +282,7 @@ RUN \
|
|||||||
make -j1 && \
|
make -j1 && \
|
||||||
make -j $(nproc) install && \
|
make -j $(nproc) install && \
|
||||||
rm -rf ${DIR}
|
rm -rf ${DIR}
|
||||||
## fontconfig https://www.freedesktop.org/wiki/Software/fontconfig/
|
|
||||||
RUN \
|
|
||||||
DIR=/tmp/fontconfig && \
|
|
||||||
mkdir -p ${DIR} && \
|
|
||||||
cd ${DIR} && \
|
|
||||||
curl -sLO https://www.freedesktop.org/software/fontconfig/release/fontconfig-${FONTCONFIG_VERSION}.tar.bz2 && \
|
|
||||||
tar -jx --strip-components=1 -f fontconfig-${FONTCONFIG_VERSION}.tar.bz2 && \
|
|
||||||
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
|
||||||
make -j $(nproc) && \
|
|
||||||
make -j $(nproc) install && \
|
|
||||||
rm -rf ${DIR}
|
|
||||||
## libass https://github.com/libass/libass
|
|
||||||
RUN \
|
|
||||||
DIR=/tmp/libass && \
|
|
||||||
mkdir -p ${DIR} && \
|
|
||||||
cd ${DIR} && \
|
|
||||||
curl -sLO https://github.com/libass/libass/archive/${LIBASS_VERSION}.tar.gz && \
|
|
||||||
echo ${LIBASS_SHA256SUM} | sha256sum --check && \
|
|
||||||
tar -zx --strip-components=1 -f ${LIBASS_VERSION}.tar.gz && \
|
|
||||||
./autogen.sh && \
|
|
||||||
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
|
||||||
make -j $(nproc) && \
|
|
||||||
make -j $(nproc) install && \
|
|
||||||
rm -rf ${DIR}
|
|
||||||
## kvazaar https://github.com/ultravideo/kvazaar
|
## kvazaar https://github.com/ultravideo/kvazaar
|
||||||
RUN \
|
RUN \
|
||||||
DIR=/tmp/kvazaar && \
|
DIR=/tmp/kvazaar && \
|
||||||
@@ -409,32 +379,6 @@ RUN \
|
|||||||
make -j $(nproc) install && \
|
make -j $(nproc) install && \
|
||||||
rm -rf ${DIR}
|
rm -rf ${DIR}
|
||||||
|
|
||||||
## libxml2 - for libbluray
|
|
||||||
RUN \
|
|
||||||
DIR=/tmp/libxml2 && \
|
|
||||||
mkdir -p ${DIR} && \
|
|
||||||
cd ${DIR} && \
|
|
||||||
curl -sLO https://gitlab.gnome.org/GNOME/libxml2/-/archive/v${LIBXML2_VERSION}/libxml2-v${LIBXML2_VERSION}.tar.gz && \
|
|
||||||
echo ${LIBXML2_SHA256SUM} | sha256sum --check && \
|
|
||||||
tar -xz --strip-components=1 -f libxml2-v${LIBXML2_VERSION}.tar.gz && \
|
|
||||||
./autogen.sh --prefix="${PREFIX}" --with-ftp=no --with-http=no --with-python=no && \
|
|
||||||
make -j $(nproc) && \
|
|
||||||
make -j $(nproc) install && \
|
|
||||||
rm -rf ${DIR}
|
|
||||||
|
|
||||||
## libbluray - Requires libxml, freetype, and fontconfig
|
|
||||||
RUN \
|
|
||||||
DIR=/tmp/libbluray && \
|
|
||||||
mkdir -p ${DIR} && \
|
|
||||||
cd ${DIR} && \
|
|
||||||
curl -sLO https://download.videolan.org/pub/videolan/libbluray/${LIBBLURAY_VERSION}/libbluray-${LIBBLURAY_VERSION}.tar.bz2 && \
|
|
||||||
echo ${LIBBLURAY_SHA256SUM} | sha256sum --check && \
|
|
||||||
tar -jx --strip-components=1 -f libbluray-${LIBBLURAY_VERSION}.tar.bz2 && \
|
|
||||||
./configure --prefix="${PREFIX}" --disable-examples --disable-bdjava-jar --disable-static --enable-shared && \
|
|
||||||
make -j $(nproc) && \
|
|
||||||
make -j $(nproc) install && \
|
|
||||||
rm -rf ${DIR}
|
|
||||||
|
|
||||||
## libzmq https://github.com/zeromq/libzmq/
|
## libzmq https://github.com/zeromq/libzmq/
|
||||||
RUN \
|
RUN \
|
||||||
DIR=/tmp/libzmq && \
|
DIR=/tmp/libzmq && \
|
||||||
@@ -475,8 +419,6 @@ RUN \
|
|||||||
--enable-libopencore-amrnb \
|
--enable-libopencore-amrnb \
|
||||||
--enable-libopencore-amrwb \
|
--enable-libopencore-amrwb \
|
||||||
--enable-gpl \
|
--enable-gpl \
|
||||||
--enable-libass \
|
|
||||||
--enable-fontconfig \
|
|
||||||
--enable-libfreetype \
|
--enable-libfreetype \
|
||||||
--enable-libvidstab \
|
--enable-libvidstab \
|
||||||
--enable-libmp3lame \
|
--enable-libmp3lame \
|
||||||
@@ -495,7 +437,6 @@ RUN \
|
|||||||
--enable-postproc \
|
--enable-postproc \
|
||||||
--enable-small \
|
--enable-small \
|
||||||
--enable-version3 \
|
--enable-version3 \
|
||||||
--enable-libbluray \
|
|
||||||
--enable-libzmq \
|
--enable-libzmq \
|
||||||
--extra-libs=-ldl \
|
--extra-libs=-ldl \
|
||||||
--prefix="${PREFIX}" \
|
--prefix="${PREFIX}" \
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
# Hardware Acceleration for Decoding Video
|
|
||||||
|
|
||||||
FFmpeg is compiled to support hardware accelerated decoding of video streams.
|
|
||||||
|
|
||||||
## Intel-based CPUs via Quicksync (https://trac.ffmpeg.org/wiki/Hardware/QuickSync)
|
|
||||||
```yaml
|
|
||||||
ffmpeg:
|
|
||||||
hwaccel_args:
|
|
||||||
- -hwaccel
|
|
||||||
- vaapi
|
|
||||||
- -hwaccel_device
|
|
||||||
- /dev/dri/renderD128
|
|
||||||
- -hwaccel_output_format
|
|
||||||
- yuv420p
|
|
||||||
```
|
|
||||||
|
|
||||||
## Raspberry Pi 3b and 4 (32bit OS)
|
|
||||||
Ensure you increase the allocated RAM for your GPU to at least 128 (raspi-config > Advanced Options > Memory Split)
|
|
||||||
```yaml
|
|
||||||
ffmpeg:
|
|
||||||
hwaccel_args:
|
|
||||||
- -c:v
|
|
||||||
- h264_mmal
|
|
||||||
```
|
|
||||||
|
|
||||||
## Raspberry Pi 4 (64bit OS)
|
|
||||||
```yaml
|
|
||||||
ffmpeg:
|
|
||||||
hwaccel_args:
|
|
||||||
- -c:v
|
|
||||||
- h264_v4l2m2m
|
|
||||||
```
|
|
||||||
@@ -16,8 +16,6 @@ input_args:
|
|||||||
- experimental
|
- experimental
|
||||||
- -fflags
|
- -fflags
|
||||||
- +genpts+discardcorrupt
|
- +genpts+discardcorrupt
|
||||||
- -vsync
|
|
||||||
- drop
|
|
||||||
- -use_wallclock_as_timestamps
|
- -use_wallclock_as_timestamps
|
||||||
- '1'
|
- '1'
|
||||||
```
|
```
|
||||||
|
Before Width: | Height: | Size: 132 KiB After Width: | Height: | Size: 132 KiB |
BIN
docs/example-mask-poly.png
Normal file
|
After Width: | Height: | Size: 2.1 MiB |
BIN
docs/frigate.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
10
docs/how-frigate-works.md
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
# How Frigate Works
|
||||||
|
Frigate is designed to minimize resource and maximize performance by only looking for objects when and where it is necessary
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## 1. Look for Motion
|
||||||
|
|
||||||
|
## 2. Calculate Detection Regions
|
||||||
|
|
||||||
|
## 3. Run Object Detection
|
||||||
BIN
docs/media_browser.png
Normal file
|
After Width: | Height: | Size: 781 KiB |
52
docs/notification-examples.md
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# Notification examples
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
automation:
|
||||||
|
|
||||||
|
- alias: When a person enters a zone named yard
|
||||||
|
trigger:
|
||||||
|
platform: mqtt
|
||||||
|
topic: frigate/events
|
||||||
|
conditions:
|
||||||
|
- "{{ trigger.payload_json["after"]["label"] == 'person' }}"
|
||||||
|
- "{{ 'yard' in trigger.payload_json["after"]["entered_zones"] }}"
|
||||||
|
action:
|
||||||
|
- service: notify.mobile_app_pixel_3
|
||||||
|
data_template:
|
||||||
|
message: 'A {{trigger.payload_json["after"]["label"]}} has entered the yard.'
|
||||||
|
data:
|
||||||
|
image: 'https://url.com/api/frigate/notifications/{{trigger.payload_json["after"]["id"]}}.jpg'
|
||||||
|
tag: '{{trigger.payload_json["after"]["id"]}}'
|
||||||
|
|
||||||
|
- alias: When a person leaves a zone named yard
|
||||||
|
trigger:
|
||||||
|
platform: mqtt
|
||||||
|
topic: frigate/events
|
||||||
|
conditions:
|
||||||
|
- "{{ trigger.payload_json["after"]["label"] == 'person' }}"
|
||||||
|
- "{{ 'yard' in trigger.payload_json["before"]["current_zones"] }}"
|
||||||
|
- "{{ not 'yard' in trigger.payload_json["after"]["current_zones"] }}"
|
||||||
|
action:
|
||||||
|
- service: notify.mobile_app_pixel_3
|
||||||
|
data_template:
|
||||||
|
message: 'A {{trigger.payload_json["after"]["label"]}} has left the yard.'
|
||||||
|
data:
|
||||||
|
image: 'https://url.com/api/frigate/notifications/{{trigger.payload_json["after"]["id"]}}.jpg'
|
||||||
|
tag: '{{trigger.payload_json["after"]["id"]}}'
|
||||||
|
|
||||||
|
- alias: Notify for dogs in the front with a high top score
|
||||||
|
trigger:
|
||||||
|
platform: mqtt
|
||||||
|
topic: frigate/events
|
||||||
|
conditions:
|
||||||
|
- "{{ trigger.payload_json["after"]["label"] == 'dog' }}"
|
||||||
|
- "{{ trigger.payload_json["after"]["camera"] == 'front' }}"
|
||||||
|
- "{{ trigger.payload_json["after"]["top_score"] > 0.98 }}"
|
||||||
|
action:
|
||||||
|
- service: notify.mobile_app_pixel_3
|
||||||
|
data_template:
|
||||||
|
message: 'High confidence dog detection.'
|
||||||
|
data:
|
||||||
|
image: 'https://url.com/api/frigate/notifications/{{trigger.payload_json["after"]["id"]}}.jpg'
|
||||||
|
tag: '{{trigger.payload_json["after"]["id"]}}'
|
||||||
|
```
|
||||||
BIN
docs/notification.png
Normal file
|
After Width: | Height: | Size: 1.5 MiB |
105
docs/nvdec.md
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
# nVidia hardware decoder (NVDEC)
|
||||||
|
|
||||||
|
Certain nvidia cards include a hardware decoder, which can greatly improve the
|
||||||
|
performance of video decoding. In order to use NVDEC, a special build of
|
||||||
|
ffmpeg with NVDEC support is required. The special docker architecture 'amd64nvidia'
|
||||||
|
includes this support for amd64 platforms. An aarch64 for the Jetson, which
|
||||||
|
also includes NVDEC may be added in the future.
|
||||||
|
|
||||||
|
## Docker setup
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
[nVidia closed source driver](https://www.nvidia.com/en-us/drivers/unix/) required to access NVDEC.
|
||||||
|
[nvidia-docker](https://github.com/NVIDIA/nvidia-docker) required to pass NVDEC to docker.
|
||||||
|
|
||||||
|
### Setting up docker-compose
|
||||||
|
|
||||||
|
In order to pass NVDEC, the docker engine must be set to `nvidia` and the environment variables
|
||||||
|
`NVIDIA_VISIBLE_DEVICES=all` and `NVIDIA_DRIVER_CAPABILITIES=compute,utility,video` must be set.
|
||||||
|
|
||||||
|
In a docker compose file, these lines need to be set:
|
||||||
|
```
|
||||||
|
services:
|
||||||
|
frigate:
|
||||||
|
...
|
||||||
|
image: blakeblackshear/frigate:stable-amd64nvidia
|
||||||
|
runtime: nvidia
|
||||||
|
environment:
|
||||||
|
- NVIDIA_VISIBLE_DEVICES=all
|
||||||
|
- NVIDIA_DRIVER_CAPABILITIES=compute,utility,video
|
||||||
|
```
|
||||||
|
|
||||||
|
### Setting up the configuration file
|
||||||
|
|
||||||
|
In your frigate config.yml, you'll need to set ffmpeg to use the hardware decoder.
|
||||||
|
The decoder you choose will depend on the input video.
|
||||||
|
|
||||||
|
A list of supported codecs (you can use `ffmpeg -decoders | grep cuvid` in the container to get a list)
|
||||||
|
```
|
||||||
|
V..... h263_cuvid Nvidia CUVID H263 decoder (codec h263)
|
||||||
|
V..... h264_cuvid Nvidia CUVID H264 decoder (codec h264)
|
||||||
|
V..... hevc_cuvid Nvidia CUVID HEVC decoder (codec hevc)
|
||||||
|
V..... mjpeg_cuvid Nvidia CUVID MJPEG decoder (codec mjpeg)
|
||||||
|
V..... mpeg1_cuvid Nvidia CUVID MPEG1VIDEO decoder (codec mpeg1video)
|
||||||
|
V..... mpeg2_cuvid Nvidia CUVID MPEG2VIDEO decoder (codec mpeg2video)
|
||||||
|
V..... mpeg4_cuvid Nvidia CUVID MPEG4 decoder (codec mpeg4)
|
||||||
|
V..... vc1_cuvid Nvidia CUVID VC1 decoder (codec vc1)
|
||||||
|
V..... vp8_cuvid Nvidia CUVID VP8 decoder (codec vp8)
|
||||||
|
V..... vp9_cuvid Nvidia CUVID VP9 decoder (codec vp9)
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, for H265 video (hevc), you'll select `hevc_cuvid`. Add
|
||||||
|
`-c:v hevc_covid` to your ffmpeg input arguments:
|
||||||
|
|
||||||
|
```
|
||||||
|
ffmpeg:
|
||||||
|
input_args:
|
||||||
|
...
|
||||||
|
- -c:v
|
||||||
|
- hevc_cuvid
|
||||||
|
```
|
||||||
|
|
||||||
|
If everything is working correctly, you should see a significant improvement in performance.
|
||||||
|
Verify that hardware decoding is working by running `nvidia-smi`, which should show the ffmpeg
|
||||||
|
processes:
|
||||||
|
|
||||||
|
```
|
||||||
|
+-----------------------------------------------------------------------------+
|
||||||
|
| NVIDIA-SMI 455.38 Driver Version: 455.38 CUDA Version: 11.1 |
|
||||||
|
|-------------------------------+----------------------+----------------------+
|
||||||
|
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
|
||||||
|
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|
||||||
|
| | | MIG M. |
|
||||||
|
|===============================+======================+======================|
|
||||||
|
| 0 GeForce GTX 166... Off | 00000000:03:00.0 Off | N/A |
|
||||||
|
| 38% 41C P2 36W / 125W | 2082MiB / 5942MiB | 5% Default |
|
||||||
|
| | | N/A |
|
||||||
|
+-------------------------------+----------------------+----------------------+
|
||||||
|
|
||||||
|
+-----------------------------------------------------------------------------+
|
||||||
|
| Processes: |
|
||||||
|
| GPU GI CI PID Type Process name GPU Memory |
|
||||||
|
| ID ID Usage |
|
||||||
|
|=============================================================================|
|
||||||
|
| 0 N/A N/A 12737 C ffmpeg 249MiB |
|
||||||
|
| 0 N/A N/A 12751 C ffmpeg 249MiB |
|
||||||
|
| 0 N/A N/A 12772 C ffmpeg 249MiB |
|
||||||
|
| 0 N/A N/A 12775 C ffmpeg 249MiB |
|
||||||
|
| 0 N/A N/A 12800 C ffmpeg 249MiB |
|
||||||
|
| 0 N/A N/A 12811 C ffmpeg 417MiB |
|
||||||
|
| 0 N/A N/A 12827 C ffmpeg 417MiB |
|
||||||
|
+-----------------------------------------------------------------------------+
|
||||||
|
```
|
||||||
|
|
||||||
|
To further improve performance, you can set ffmpeg to skip frames in the output,
|
||||||
|
using the fps filter:
|
||||||
|
|
||||||
|
```
|
||||||
|
output_args:
|
||||||
|
- -filter:v
|
||||||
|
- fps=fps=5
|
||||||
|
```
|
||||||
|
|
||||||
|
This setting, for example, allows Frigate to consume my 10-15fps camera streams on
|
||||||
|
my relatively low powered Haswell machine with relatively low cpu usage.
|
||||||
|
|
||||||
15
frigate/__main__.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
import faulthandler; faulthandler.enable()
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
|
||||||
|
threading.current_thread().name = "frigate"
|
||||||
|
|
||||||
|
from frigate.app import FrigateApp
|
||||||
|
|
||||||
|
cli = sys.modules['flask.cli']
|
||||||
|
cli.show_server_banner = lambda *x: None
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
frigate_app = FrigateApp()
|
||||||
|
|
||||||
|
frigate_app.start()
|
||||||
207
frigate/app.py
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import multiprocessing as mp
|
||||||
|
import os
|
||||||
|
from logging.handlers import QueueHandler
|
||||||
|
from typing import Dict, List
|
||||||
|
import sys
|
||||||
|
import signal
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
from playhouse.sqlite_ext import SqliteExtDatabase
|
||||||
|
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
|
||||||
|
from frigate.edgetpu import EdgeTPUProcess
|
||||||
|
from frigate.events import EventProcessor, EventCleanup
|
||||||
|
from frigate.http import create_app
|
||||||
|
from frigate.log import log_process, root_configurer
|
||||||
|
from frigate.models import Event
|
||||||
|
from frigate.mqtt import create_mqtt_client
|
||||||
|
from frigate.object_processing import TrackedObjectProcessor
|
||||||
|
from frigate.record import RecordingMaintainer
|
||||||
|
from frigate.video import capture_camera, track_camera
|
||||||
|
from frigate.watchdog import FrigateWatchdog
|
||||||
|
from frigate.zeroconf import broadcast_zeroconf
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class FrigateApp():
|
||||||
|
def __init__(self):
|
||||||
|
self.stop_event = mp.Event()
|
||||||
|
self.config: FrigateConfig = None
|
||||||
|
self.detection_queue = mp.Queue()
|
||||||
|
self.detectors: Dict[str, EdgeTPUProcess] = {}
|
||||||
|
self.detection_out_events: Dict[str, mp.Event] = {}
|
||||||
|
self.detection_shms: List[mp.shared_memory.SharedMemory] = []
|
||||||
|
self.log_queue = mp.Queue()
|
||||||
|
self.camera_metrics = {}
|
||||||
|
|
||||||
|
def ensure_dirs(self):
|
||||||
|
for d in [RECORD_DIR, CLIPS_DIR, CACHE_DIR]:
|
||||||
|
if not os.path.exists(d) and not os.path.islink(d):
|
||||||
|
logger.info(f"Creating directory: {d}")
|
||||||
|
os.makedirs(d)
|
||||||
|
else:
|
||||||
|
logger.debug(f"Skipping directory: {d}")
|
||||||
|
|
||||||
|
def init_logger(self):
|
||||||
|
self.log_process = mp.Process(target=log_process, args=(self.log_queue,), name='log_process')
|
||||||
|
self.log_process.daemon = True
|
||||||
|
self.log_process.start()
|
||||||
|
root_configurer(self.log_queue)
|
||||||
|
|
||||||
|
def init_config(self):
|
||||||
|
config_file = os.environ.get('CONFIG_FILE', '/config/config.yml')
|
||||||
|
self.config = FrigateConfig(config_file=config_file)
|
||||||
|
|
||||||
|
for camera_name in self.config.cameras.keys():
|
||||||
|
# create camera_metrics
|
||||||
|
self.camera_metrics[camera_name] = {
|
||||||
|
'camera_fps': mp.Value('d', 0.0),
|
||||||
|
'skipped_fps': mp.Value('d', 0.0),
|
||||||
|
'process_fps': mp.Value('d', 0.0),
|
||||||
|
'detection_fps': mp.Value('d', 0.0),
|
||||||
|
'detection_frame': mp.Value('d', 0.0),
|
||||||
|
'read_start': mp.Value('d', 0.0),
|
||||||
|
'ffmpeg_pid': mp.Value('i', 0),
|
||||||
|
'frame_queue': mp.Queue(maxsize=2)
|
||||||
|
}
|
||||||
|
|
||||||
|
def set_log_levels(self):
|
||||||
|
logging.getLogger().setLevel(self.config.logger.default)
|
||||||
|
for log, level in self.config.logger.logs.items():
|
||||||
|
logging.getLogger(log).setLevel(level)
|
||||||
|
|
||||||
|
if not 'werkzeug' in self.config.logger.logs:
|
||||||
|
logging.getLogger('werkzeug').setLevel('ERROR')
|
||||||
|
|
||||||
|
def init_queues(self):
|
||||||
|
# Queues for clip processing
|
||||||
|
self.event_queue = mp.Queue()
|
||||||
|
self.event_processed_queue = mp.Queue()
|
||||||
|
|
||||||
|
# Queue for cameras to push tracked objects to
|
||||||
|
self.detected_frames_queue = mp.Queue(maxsize=len(self.config.cameras.keys())*2)
|
||||||
|
|
||||||
|
def init_database(self):
|
||||||
|
self.db = SqliteExtDatabase(f"/{os.path.join(CLIPS_DIR, 'frigate.db')}")
|
||||||
|
models = [Event]
|
||||||
|
self.db.bind(models)
|
||||||
|
self.db.create_tables(models, safe=True)
|
||||||
|
|
||||||
|
def init_web_server(self):
|
||||||
|
self.flask_app = create_app(self.config, self.db, self.camera_metrics, self.detectors, self.detected_frames_processor)
|
||||||
|
|
||||||
|
def init_mqtt(self):
|
||||||
|
self.mqtt_client = create_mqtt_client(self.config.mqtt)
|
||||||
|
|
||||||
|
def start_detectors(self):
|
||||||
|
for name in self.config.cameras.keys():
|
||||||
|
self.detection_out_events[name] = mp.Event()
|
||||||
|
shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=300*300*3)
|
||||||
|
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20*6*4)
|
||||||
|
self.detection_shms.append(shm_in)
|
||||||
|
self.detection_shms.append(shm_out)
|
||||||
|
|
||||||
|
for name, detector in self.config.detectors.items():
|
||||||
|
if detector.type == 'cpu':
|
||||||
|
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, out_events=self.detection_out_events, tf_device='cpu')
|
||||||
|
if detector.type == 'edgetpu':
|
||||||
|
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, out_events=self.detection_out_events, tf_device=detector.device)
|
||||||
|
|
||||||
|
def start_detected_frames_processor(self):
|
||||||
|
self.detected_frames_processor = TrackedObjectProcessor(self.config, self.mqtt_client, self.config.mqtt.topic_prefix,
|
||||||
|
self.detected_frames_queue, self.event_queue, self.event_processed_queue, self.stop_event)
|
||||||
|
self.detected_frames_processor.start()
|
||||||
|
|
||||||
|
def start_camera_processors(self):
|
||||||
|
for name, config in self.config.cameras.items():
|
||||||
|
camera_process = mp.Process(target=track_camera, name=f"camera_processor:{name}", args=(name, config,
|
||||||
|
self.detection_queue, self.detection_out_events[name], self.detected_frames_queue,
|
||||||
|
self.camera_metrics[name]))
|
||||||
|
camera_process.daemon = True
|
||||||
|
self.camera_metrics[name]['process'] = camera_process
|
||||||
|
camera_process.start()
|
||||||
|
logger.info(f"Camera processor started for {name}: {camera_process.pid}")
|
||||||
|
|
||||||
|
def start_camera_capture_processes(self):
|
||||||
|
for name, config in self.config.cameras.items():
|
||||||
|
capture_process = mp.Process(target=capture_camera, name=f"camera_capture:{name}", args=(name, config,
|
||||||
|
self.camera_metrics[name]))
|
||||||
|
capture_process.daemon = True
|
||||||
|
self.camera_metrics[name]['capture_process'] = capture_process
|
||||||
|
capture_process.start()
|
||||||
|
logger.info(f"Capture process started for {name}: {capture_process.pid}")
|
||||||
|
|
||||||
|
def start_event_processor(self):
|
||||||
|
self.event_processor = EventProcessor(self.config, self.camera_metrics, self.event_queue, self.event_processed_queue, self.stop_event)
|
||||||
|
self.event_processor.start()
|
||||||
|
|
||||||
|
def start_event_cleanup(self):
|
||||||
|
self.event_cleanup = EventCleanup(self.config, self.stop_event)
|
||||||
|
self.event_cleanup.start()
|
||||||
|
|
||||||
|
def start_recording_maintainer(self):
|
||||||
|
self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event)
|
||||||
|
self.recording_maintainer.start()
|
||||||
|
|
||||||
|
def start_watchdog(self):
|
||||||
|
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
|
||||||
|
self.frigate_watchdog.start()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
self.init_logger()
|
||||||
|
try:
|
||||||
|
self.ensure_dirs()
|
||||||
|
try:
|
||||||
|
self.init_config()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error parsing config: {e}")
|
||||||
|
self.log_process.terminate()
|
||||||
|
sys.exit(1)
|
||||||
|
self.set_log_levels()
|
||||||
|
self.init_queues()
|
||||||
|
self.init_database()
|
||||||
|
self.init_mqtt()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(e)
|
||||||
|
self.log_process.terminate()
|
||||||
|
sys.exit(1)
|
||||||
|
self.start_detectors()
|
||||||
|
self.start_detected_frames_processor()
|
||||||
|
self.start_camera_processors()
|
||||||
|
self.start_camera_capture_processes()
|
||||||
|
self.init_web_server()
|
||||||
|
self.start_event_processor()
|
||||||
|
self.start_event_cleanup()
|
||||||
|
self.start_recording_maintainer()
|
||||||
|
self.start_watchdog()
|
||||||
|
# self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id)
|
||||||
|
|
||||||
|
def receiveSignal(signalNumber, frame):
|
||||||
|
self.stop()
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, receiveSignal)
|
||||||
|
|
||||||
|
self.flask_app.run(host='127.0.0.1', port=5001, debug=False)
|
||||||
|
self.stop()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
logger.info(f"Stopping...")
|
||||||
|
self.stop_event.set()
|
||||||
|
|
||||||
|
self.detected_frames_processor.join()
|
||||||
|
self.event_processor.join()
|
||||||
|
self.event_cleanup.join()
|
||||||
|
self.recording_maintainer.join()
|
||||||
|
self.frigate_watchdog.join()
|
||||||
|
|
||||||
|
for detector in self.detectors.values():
|
||||||
|
detector.stop()
|
||||||
|
|
||||||
|
while len(self.detection_shms) > 0:
|
||||||
|
shm = self.detection_shms.pop()
|
||||||
|
shm.close()
|
||||||
|
shm.unlink()
|
||||||
806
frigate/config.py
Normal file
@@ -0,0 +1,806 @@
|
|||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import numpy as np
|
||||||
|
import voluptuous as vol
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
|
||||||
|
|
||||||
|
DETECTORS_SCHEMA = vol.Schema(
|
||||||
|
{
|
||||||
|
vol.Required(str): {
|
||||||
|
vol.Required('type', default='edgetpu'): vol.In(['cpu', 'edgetpu']),
|
||||||
|
vol.Optional('device', default='usb'): str
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
DEFAULT_DETECTORS = {
|
||||||
|
'coral': {
|
||||||
|
'type': 'edgetpu',
|
||||||
|
'device': 'usb'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MQTT_SCHEMA = vol.Schema(
|
||||||
|
{
|
||||||
|
vol.Required('host'): str,
|
||||||
|
vol.Optional('port', default=1883): int,
|
||||||
|
vol.Optional('topic_prefix', default='frigate'): str,
|
||||||
|
vol.Optional('client_id', default='frigate'): str,
|
||||||
|
'user': str,
|
||||||
|
'password': str
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
SAVE_CLIPS_RETAIN_SCHEMA = vol.Schema(
|
||||||
|
{
|
||||||
|
vol.Required('default',default=10): int,
|
||||||
|
'objects': {
|
||||||
|
str: int
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
SAVE_CLIPS_SCHEMA = vol.Schema(
|
||||||
|
{
|
||||||
|
vol.Optional('max_seconds', default=300): int,
|
||||||
|
vol.Optional('retain', default={}): SAVE_CLIPS_RETAIN_SCHEMA
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
FFMPEG_GLOBAL_ARGS_DEFAULT = ['-hide_banner','-loglevel','fatal']
|
||||||
|
FFMPEG_INPUT_ARGS_DEFAULT = ['-avoid_negative_ts', 'make_zero',
|
||||||
|
'-fflags', '+genpts+discardcorrupt',
|
||||||
|
'-rtsp_transport', 'tcp',
|
||||||
|
'-stimeout', '5000000',
|
||||||
|
'-use_wallclock_as_timestamps', '1']
|
||||||
|
DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = ['-f', 'rawvideo',
|
||||||
|
'-pix_fmt', 'yuv420p']
|
||||||
|
RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-c", "copy", "-f", "flv"]
|
||||||
|
SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "segment", "-segment_time",
|
||||||
|
"10", "-segment_format", "mp4", "-reset_timestamps", "1", "-strftime",
|
||||||
|
"1", "-c", "copy", "-an"]
|
||||||
|
RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "segment", "-segment_time",
|
||||||
|
"60", "-segment_format", "mp4", "-reset_timestamps", "1", "-strftime",
|
||||||
|
"1", "-c", "copy", "-an"]
|
||||||
|
|
||||||
|
GLOBAL_FFMPEG_SCHEMA = vol.Schema(
|
||||||
|
{
|
||||||
|
vol.Optional('global_args', default=FFMPEG_GLOBAL_ARGS_DEFAULT): vol.Any(str, [str]),
|
||||||
|
vol.Optional('hwaccel_args', default=[]): vol.Any(str, [str]),
|
||||||
|
vol.Optional('input_args', default=FFMPEG_INPUT_ARGS_DEFAULT): vol.Any(str, [str]),
|
||||||
|
vol.Optional('output_args', default={}): {
|
||||||
|
vol.Optional('detect', default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
|
||||||
|
vol.Optional('record', default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
|
||||||
|
vol.Optional('clips', default=SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
|
||||||
|
vol.Optional('rtmp', default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
FILTER_SCHEMA = vol.Schema(
|
||||||
|
{
|
||||||
|
str: {
|
||||||
|
vol.Optional('min_area', default=0): int,
|
||||||
|
vol.Optional('max_area', default=24000000): int,
|
||||||
|
vol.Optional('threshold', default=0.85): float
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
def filters_for_all_tracked_objects(object_config):
|
||||||
|
for tracked_object in object_config.get('track', ['person']):
|
||||||
|
if not 'filters' in object_config:
|
||||||
|
object_config['filters'] = {}
|
||||||
|
if not tracked_object in object_config['filters']:
|
||||||
|
object_config['filters'][tracked_object] = {}
|
||||||
|
return object_config
|
||||||
|
|
||||||
|
OBJECTS_SCHEMA = vol.Schema(vol.All(filters_for_all_tracked_objects,
|
||||||
|
{
|
||||||
|
vol.Optional('track', default=['person']): [str],
|
||||||
|
vol.Optional('filters', default = {}): FILTER_SCHEMA.extend({ str: {vol.Optional('min_score', default=0.5): float}})
|
||||||
|
}
|
||||||
|
))
|
||||||
|
|
||||||
|
DEFAULT_CAMERA_SAVE_CLIPS = {
|
||||||
|
'enabled': False
|
||||||
|
}
|
||||||
|
DEFAULT_CAMERA_SNAPSHOTS = {
|
||||||
|
'show_timestamp': True,
|
||||||
|
'draw_zones': False,
|
||||||
|
'draw_bounding_boxes': True,
|
||||||
|
'crop_to_region': True
|
||||||
|
}
|
||||||
|
|
||||||
|
def each_role_used_once(inputs):
|
||||||
|
roles = [role for i in inputs for role in i['roles']]
|
||||||
|
roles_set = set(roles)
|
||||||
|
if len(roles) > len(roles_set):
|
||||||
|
raise ValueError
|
||||||
|
return inputs
|
||||||
|
|
||||||
|
CAMERA_FFMPEG_SCHEMA = vol.Schema(
|
||||||
|
{
|
||||||
|
vol.Required('inputs'): vol.All([{
|
||||||
|
vol.Required('path'): str,
|
||||||
|
vol.Required('roles'): ['detect', 'clips', 'record', 'rtmp'],
|
||||||
|
'global_args': vol.Any(str, [str]),
|
||||||
|
'hwaccel_args': vol.Any(str, [str]),
|
||||||
|
'input_args': vol.Any(str, [str]),
|
||||||
|
}], vol.Msg(each_role_used_once, msg="Each input role may only be used once")),
|
||||||
|
'output_args': {
|
||||||
|
vol.Optional('detect', default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
|
||||||
|
vol.Optional('record', default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
|
||||||
|
vol.Optional('clips', default=SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
|
||||||
|
vol.Optional('rtmp', default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
def ensure_zones_and_cameras_have_different_names(cameras):
|
||||||
|
zones = [zone for camera in cameras.values() for zone in camera['zones'].keys()]
|
||||||
|
for zone in zones:
|
||||||
|
if zone in cameras.keys():
|
||||||
|
raise ValueError
|
||||||
|
return cameras
|
||||||
|
|
||||||
|
CAMERAS_SCHEMA = vol.Schema(vol.All(
|
||||||
|
{
|
||||||
|
str: {
|
||||||
|
vol.Required('ffmpeg'): CAMERA_FFMPEG_SCHEMA,
|
||||||
|
vol.Required('height'): int,
|
||||||
|
vol.Required('width'): int,
|
||||||
|
'fps': int,
|
||||||
|
'mask': str,
|
||||||
|
vol.Optional('best_image_timeout', default=60): int,
|
||||||
|
vol.Optional('zones', default={}): {
|
||||||
|
str: {
|
||||||
|
vol.Required('coordinates'): vol.Any(str, [str]),
|
||||||
|
vol.Optional('filters', default={}): FILTER_SCHEMA
|
||||||
|
}
|
||||||
|
},
|
||||||
|
vol.Optional('save_clips', default=DEFAULT_CAMERA_SAVE_CLIPS): {
|
||||||
|
vol.Optional('enabled', default=False): bool,
|
||||||
|
vol.Optional('pre_capture', default=30): int,
|
||||||
|
'objects': [str],
|
||||||
|
vol.Optional('retain', default={}): SAVE_CLIPS_RETAIN_SCHEMA,
|
||||||
|
},
|
||||||
|
vol.Optional('record', default={}): {
|
||||||
|
'enabled': bool,
|
||||||
|
'retain_days': int,
|
||||||
|
},
|
||||||
|
vol.Optional('rtmp', default={}): {
|
||||||
|
vol.Required('enabled', default=True): bool,
|
||||||
|
},
|
||||||
|
vol.Optional('snapshots', default=DEFAULT_CAMERA_SNAPSHOTS): {
|
||||||
|
vol.Optional('show_timestamp', default=True): bool,
|
||||||
|
vol.Optional('draw_zones', default=False): bool,
|
||||||
|
vol.Optional('draw_bounding_boxes', default=True): bool,
|
||||||
|
vol.Optional('crop_to_region', default=True): bool,
|
||||||
|
vol.Optional('height', default=175): int
|
||||||
|
},
|
||||||
|
'objects': OBJECTS_SCHEMA
|
||||||
|
}
|
||||||
|
}, vol.Msg(ensure_zones_and_cameras_have_different_names, msg='Zones cannot share names with cameras'))
|
||||||
|
)
|
||||||
|
|
||||||
|
FRIGATE_CONFIG_SCHEMA = vol.Schema(
|
||||||
|
{
|
||||||
|
vol.Optional('detectors', default=DEFAULT_DETECTORS): DETECTORS_SCHEMA,
|
||||||
|
'mqtt': MQTT_SCHEMA,
|
||||||
|
vol.Optional('logger', default={'default': 'info', 'logs': {}}): {
|
||||||
|
vol.Optional('default', default='info'): vol.In(['info', 'debug', 'warning', 'error', 'critical']),
|
||||||
|
vol.Optional('logs', default={}): {str: vol.In(['info', 'debug', 'warning', 'error', 'critical']) }
|
||||||
|
},
|
||||||
|
vol.Optional('save_clips', default={}): SAVE_CLIPS_SCHEMA,
|
||||||
|
vol.Optional('record', default={}): {
|
||||||
|
vol.Optional('enabled', default=False): bool,
|
||||||
|
vol.Optional('retain_days', default=30): int,
|
||||||
|
},
|
||||||
|
vol.Optional('ffmpeg', default={}): GLOBAL_FFMPEG_SCHEMA,
|
||||||
|
vol.Optional('objects', default={}): OBJECTS_SCHEMA,
|
||||||
|
vol.Required('cameras', default={}): CAMERAS_SCHEMA
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
class DetectorConfig():
|
||||||
|
def __init__(self, config):
|
||||||
|
self._type = config['type']
|
||||||
|
self._device = config['device']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def type(self):
|
||||||
|
return self._type
|
||||||
|
|
||||||
|
@property
|
||||||
|
def device(self):
|
||||||
|
return self._device
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'type': self.type,
|
||||||
|
'device': self.device
|
||||||
|
}
|
||||||
|
|
||||||
|
class LoggerConfig():
|
||||||
|
def __init__(self, config):
|
||||||
|
self._default = config['default'].upper()
|
||||||
|
self._logs = {k: v.upper() for k, v in config['logs'].items()}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def default(self):
|
||||||
|
return self._default
|
||||||
|
|
||||||
|
@property
|
||||||
|
def logs(self):
|
||||||
|
return self._logs
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'default': self.default,
|
||||||
|
'logs': self.logs
|
||||||
|
}
|
||||||
|
|
||||||
|
class MqttConfig():
|
||||||
|
def __init__(self, config):
|
||||||
|
self._host = config['host']
|
||||||
|
self._port = config['port']
|
||||||
|
self._topic_prefix = config['topic_prefix']
|
||||||
|
self._client_id = config['client_id']
|
||||||
|
self._user = config.get('user')
|
||||||
|
self._password = config.get('password')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def host(self):
|
||||||
|
return self._host
|
||||||
|
|
||||||
|
@property
|
||||||
|
def port(self):
|
||||||
|
return self._port
|
||||||
|
|
||||||
|
@property
|
||||||
|
def topic_prefix(self):
|
||||||
|
return self._topic_prefix
|
||||||
|
|
||||||
|
@property
|
||||||
|
def client_id(self):
|
||||||
|
return self._client_id
|
||||||
|
|
||||||
|
@property
|
||||||
|
def user(self):
|
||||||
|
return self._user
|
||||||
|
|
||||||
|
@property
|
||||||
|
def password(self):
|
||||||
|
return self._password
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'host': self.host,
|
||||||
|
'port': self.port,
|
||||||
|
'topic_prefix': self.topic_prefix,
|
||||||
|
'client_id': self.client_id,
|
||||||
|
'user': self.user
|
||||||
|
}
|
||||||
|
|
||||||
|
class CameraInput():
|
||||||
|
def __init__(self, global_config, ffmpeg_input):
|
||||||
|
self._path = ffmpeg_input['path']
|
||||||
|
self._roles = ffmpeg_input['roles']
|
||||||
|
self._global_args = ffmpeg_input.get('global_args', global_config['global_args'])
|
||||||
|
self._hwaccel_args = ffmpeg_input.get('hwaccel_args', global_config['hwaccel_args'])
|
||||||
|
self._input_args = ffmpeg_input.get('input_args', global_config['input_args'])
|
||||||
|
|
||||||
|
@property
|
||||||
|
def path(self):
|
||||||
|
return self._path
|
||||||
|
|
||||||
|
@property
|
||||||
|
def roles(self):
|
||||||
|
return self._roles
|
||||||
|
|
||||||
|
@property
|
||||||
|
def global_args(self):
|
||||||
|
return self._global_args if isinstance(self._global_args, list) else self._global_args.split(' ')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def hwaccel_args(self):
|
||||||
|
return self._hwaccel_args if isinstance(self._hwaccel_args, list) else self._hwaccel_args.split(' ')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def input_args(self):
|
||||||
|
return self._input_args if isinstance(self._input_args, list) else self._input_args.split(' ')
|
||||||
|
|
||||||
|
class CameraFfmpegConfig():
|
||||||
|
def __init__(self, global_config, config):
|
||||||
|
self._inputs = [CameraInput(global_config, i) for i in config['inputs']]
|
||||||
|
self._output_args = config.get('output_args', global_config['output_args'])
|
||||||
|
|
||||||
|
@property
|
||||||
|
def inputs(self):
|
||||||
|
return self._inputs
|
||||||
|
|
||||||
|
@property
|
||||||
|
def output_args(self):
|
||||||
|
return {k: v if isinstance(v, list) else v.split(' ') for k, v in self._output_args.items()}
|
||||||
|
|
||||||
|
class SaveClipsRetainConfig():
|
||||||
|
def __init__(self, global_config, config):
|
||||||
|
self._default = config.get('default', global_config.get('default'))
|
||||||
|
self._objects = config.get('objects', global_config.get('objects', {}))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def default(self):
|
||||||
|
return self._default
|
||||||
|
|
||||||
|
@property
|
||||||
|
def objects(self):
|
||||||
|
return self._objects
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'default': self.default,
|
||||||
|
'objects': self.objects
|
||||||
|
}
|
||||||
|
|
||||||
|
class SaveClipsConfig():
|
||||||
|
def __init__(self, config):
|
||||||
|
self._max_seconds = config['max_seconds']
|
||||||
|
self._retain = SaveClipsRetainConfig(config['retain'], config['retain'])
|
||||||
|
|
||||||
|
@property
|
||||||
|
def max_seconds(self):
|
||||||
|
return self._max_seconds
|
||||||
|
|
||||||
|
@property
|
||||||
|
def retain(self):
|
||||||
|
return self._retain
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'max_seconds': self.max_seconds,
|
||||||
|
'retain': self.retain.to_dict()
|
||||||
|
}
|
||||||
|
|
||||||
|
class RecordConfig():
|
||||||
|
def __init__(self, global_config, config):
|
||||||
|
self._enabled = config.get('enabled', global_config['enabled'])
|
||||||
|
self._retain_days = config.get('retain_days', global_config['retain_days'])
|
||||||
|
|
||||||
|
@property
|
||||||
|
def enabled(self):
|
||||||
|
return self._enabled
|
||||||
|
|
||||||
|
@property
|
||||||
|
def retain_days(self):
|
||||||
|
return self._retain_days
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'enabled': self.enabled,
|
||||||
|
'retain_days': self.retain_days,
|
||||||
|
}
|
||||||
|
|
||||||
|
class FilterConfig():
|
||||||
|
def __init__(self, config):
|
||||||
|
self._min_area = config['min_area']
|
||||||
|
self._max_area = config['max_area']
|
||||||
|
self._threshold = config['threshold']
|
||||||
|
self._min_score = config.get('min_score')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def min_area(self):
|
||||||
|
return self._min_area
|
||||||
|
|
||||||
|
@property
|
||||||
|
def max_area(self):
|
||||||
|
return self._max_area
|
||||||
|
|
||||||
|
@property
|
||||||
|
def threshold(self):
|
||||||
|
return self._threshold
|
||||||
|
|
||||||
|
@property
|
||||||
|
def min_score(self):
|
||||||
|
return self._min_score
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'min_area': self.min_area,
|
||||||
|
'max_area': self.max_area,
|
||||||
|
'threshold': self.threshold,
|
||||||
|
'min_score': self.min_score
|
||||||
|
}
|
||||||
|
|
||||||
|
class ObjectConfig():
|
||||||
|
def __init__(self, global_config, config):
|
||||||
|
self._track = config.get('track', global_config['track'])
|
||||||
|
if 'filters' in config:
|
||||||
|
self._filters = { name: FilterConfig(c) for name, c in config['filters'].items() }
|
||||||
|
else:
|
||||||
|
self._filters = { name: FilterConfig(c) for name, c in global_config['filters'].items() }
|
||||||
|
|
||||||
|
@property
|
||||||
|
def track(self):
|
||||||
|
return self._track
|
||||||
|
|
||||||
|
@property
|
||||||
|
def filters(self) -> Dict[str, FilterConfig]:
|
||||||
|
return self._filters
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'track': self.track,
|
||||||
|
'filters': { k: f.to_dict() for k, f in self.filters.items() }
|
||||||
|
}
|
||||||
|
|
||||||
|
class CameraSnapshotsConfig():
|
||||||
|
def __init__(self, config):
|
||||||
|
self._show_timestamp = config['show_timestamp']
|
||||||
|
self._draw_zones = config['draw_zones']
|
||||||
|
self._draw_bounding_boxes = config['draw_bounding_boxes']
|
||||||
|
self._crop_to_region = config['crop_to_region']
|
||||||
|
self._height = config.get('height')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def show_timestamp(self):
|
||||||
|
return self._show_timestamp
|
||||||
|
|
||||||
|
@property
|
||||||
|
def draw_zones(self):
|
||||||
|
return self._draw_zones
|
||||||
|
|
||||||
|
@property
|
||||||
|
def draw_bounding_boxes(self):
|
||||||
|
return self._draw_bounding_boxes
|
||||||
|
|
||||||
|
@property
|
||||||
|
def crop_to_region(self):
|
||||||
|
return self._crop_to_region
|
||||||
|
|
||||||
|
@property
|
||||||
|
def height(self):
|
||||||
|
return self._height
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'show_timestamp': self.show_timestamp,
|
||||||
|
'draw_zones': self.draw_zones,
|
||||||
|
'draw_bounding_boxes': self.draw_bounding_boxes,
|
||||||
|
'crop_to_region': self.crop_to_region,
|
||||||
|
'height': self.height
|
||||||
|
}
|
||||||
|
|
||||||
|
class CameraSaveClipsConfig():
|
||||||
|
def __init__(self, global_config, config):
|
||||||
|
self._enabled = config['enabled']
|
||||||
|
self._pre_capture = config['pre_capture']
|
||||||
|
self._objects = config.get('objects', global_config['objects']['track'])
|
||||||
|
self._retain = SaveClipsRetainConfig(global_config['save_clips']['retain'], config['retain'])
|
||||||
|
|
||||||
|
@property
|
||||||
|
def enabled(self):
|
||||||
|
return self._enabled
|
||||||
|
|
||||||
|
@property
|
||||||
|
def pre_capture(self):
|
||||||
|
return self._pre_capture
|
||||||
|
|
||||||
|
@property
|
||||||
|
def objects(self):
|
||||||
|
return self._objects
|
||||||
|
|
||||||
|
@property
|
||||||
|
def retain(self):
|
||||||
|
return self._retain
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'enabled': self.enabled,
|
||||||
|
'pre_capture': self.pre_capture,
|
||||||
|
'objects': self.objects,
|
||||||
|
'retain': self.retain.to_dict()
|
||||||
|
}
|
||||||
|
|
||||||
|
class CameraRtmpConfig():
|
||||||
|
def __init__(self, global_config, config):
|
||||||
|
self._enabled = config['enabled']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def enabled(self):
|
||||||
|
return self._enabled
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'enabled': self.enabled,
|
||||||
|
}
|
||||||
|
|
||||||
|
class ZoneConfig():
|
||||||
|
def __init__(self, name, config):
|
||||||
|
self._coordinates = config['coordinates']
|
||||||
|
self._filters = { name: FilterConfig(c) for name, c in config['filters'].items() }
|
||||||
|
|
||||||
|
if isinstance(self._coordinates, list):
|
||||||
|
self._contour = np.array([[int(p.split(',')[0]), int(p.split(',')[1])] for p in self._coordinates])
|
||||||
|
elif isinstance(self._coordinates, str):
|
||||||
|
points = self._coordinates.split(',')
|
||||||
|
self._contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
|
||||||
|
else:
|
||||||
|
print(f"Unable to parse zone coordinates for {name}")
|
||||||
|
self._contour = np.array([])
|
||||||
|
|
||||||
|
self._color = (0,0,0)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def coordinates(self):
|
||||||
|
return self._coordinates
|
||||||
|
|
||||||
|
@property
|
||||||
|
def contour(self):
|
||||||
|
return self._contour
|
||||||
|
|
||||||
|
@contour.setter
|
||||||
|
def contour(self, val):
|
||||||
|
self._contour = val
|
||||||
|
|
||||||
|
@property
|
||||||
|
def color(self):
|
||||||
|
return self._color
|
||||||
|
|
||||||
|
@color.setter
|
||||||
|
def color(self, val):
|
||||||
|
self._color = val
|
||||||
|
|
||||||
|
@property
|
||||||
|
def filters(self):
|
||||||
|
return self._filters
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'filters': {k: f.to_dict() for k, f in self.filters.items()}
|
||||||
|
}
|
||||||
|
|
||||||
|
class CameraConfig():
|
||||||
|
def __init__(self, name, config, global_config):
|
||||||
|
self._name = name
|
||||||
|
self._ffmpeg = CameraFfmpegConfig(global_config['ffmpeg'], config['ffmpeg'])
|
||||||
|
self._height = config.get('height')
|
||||||
|
self._width = config.get('width')
|
||||||
|
self._frame_shape = (self._height, self._width)
|
||||||
|
self._frame_shape_yuv = (self._frame_shape[0]*3//2, self._frame_shape[1])
|
||||||
|
self._fps = config.get('fps')
|
||||||
|
self._mask = self._create_mask(config.get('mask'))
|
||||||
|
self._best_image_timeout = config['best_image_timeout']
|
||||||
|
self._zones = { name: ZoneConfig(name, z) for name, z in config['zones'].items() }
|
||||||
|
self._save_clips = CameraSaveClipsConfig(global_config, config['save_clips'])
|
||||||
|
self._record = RecordConfig(global_config['record'], config['record'])
|
||||||
|
self._rtmp = CameraRtmpConfig(global_config, config['rtmp'])
|
||||||
|
self._snapshots = CameraSnapshotsConfig(config['snapshots'])
|
||||||
|
self._objects = ObjectConfig(global_config['objects'], config.get('objects', {}))
|
||||||
|
|
||||||
|
self._ffmpeg_cmds = []
|
||||||
|
for ffmpeg_input in self._ffmpeg.inputs:
|
||||||
|
self._ffmpeg_cmds.append({
|
||||||
|
'roles': ffmpeg_input.roles,
|
||||||
|
'cmd': self._get_ffmpeg_cmd(ffmpeg_input)
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
self._set_zone_colors(self._zones)
|
||||||
|
|
||||||
|
def _create_mask(self, mask):
|
||||||
|
if mask:
|
||||||
|
if mask.startswith('base64,'):
|
||||||
|
img = base64.b64decode(mask[7:])
|
||||||
|
np_img = np.fromstring(img, dtype=np.uint8)
|
||||||
|
mask_img = cv2.imdecode(np_img, cv2.IMREAD_GRAYSCALE)
|
||||||
|
elif mask.startswith('poly,'):
|
||||||
|
points = mask.split(',')[1:]
|
||||||
|
contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
|
||||||
|
mask_img = np.zeros(self.frame_shape, np.uint8)
|
||||||
|
mask_img[:] = 255
|
||||||
|
cv2.fillPoly(mask_img, pts=[contour], color=(0))
|
||||||
|
else:
|
||||||
|
mask_img = cv2.imread(f"/config/{mask}", cv2.IMREAD_GRAYSCALE)
|
||||||
|
else:
|
||||||
|
mask_img = None
|
||||||
|
|
||||||
|
if mask_img is None or mask_img.size == 0:
|
||||||
|
mask_img = np.zeros(self.frame_shape, np.uint8)
|
||||||
|
mask_img[:] = 255
|
||||||
|
|
||||||
|
return mask_img
|
||||||
|
|
||||||
|
def _get_ffmpeg_cmd(self, ffmpeg_input):
|
||||||
|
ffmpeg_output_args = []
|
||||||
|
if 'detect' in ffmpeg_input.roles:
|
||||||
|
ffmpeg_output_args = self.ffmpeg.output_args['detect'] + ffmpeg_output_args + ['pipe:']
|
||||||
|
if self.fps:
|
||||||
|
ffmpeg_output_args = ["-r", str(self.fps)] + ffmpeg_output_args
|
||||||
|
if 'rtmp' in ffmpeg_input.roles and self.rtmp.enabled:
|
||||||
|
ffmpeg_output_args = self.ffmpeg.output_args['rtmp'] + [
|
||||||
|
f"rtmp://127.0.0.1/live/{self.name}"
|
||||||
|
] + ffmpeg_output_args
|
||||||
|
if 'clips' in ffmpeg_input.roles and self.save_clips.enabled:
|
||||||
|
ffmpeg_output_args = self.ffmpeg.output_args['clips'] + [
|
||||||
|
f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4"
|
||||||
|
] + ffmpeg_output_args
|
||||||
|
if 'record' in ffmpeg_input.roles and self.record.enabled:
|
||||||
|
ffmpeg_output_args = self.ffmpeg.output_args['record'] + [
|
||||||
|
f"{os.path.join(RECORD_DIR, self.name)}-%Y%m%d%H%M%S.mp4"
|
||||||
|
] + ffmpeg_output_args
|
||||||
|
return (['ffmpeg'] +
|
||||||
|
ffmpeg_input.global_args +
|
||||||
|
ffmpeg_input.hwaccel_args +
|
||||||
|
ffmpeg_input.input_args +
|
||||||
|
['-i', ffmpeg_input.path] +
|
||||||
|
ffmpeg_output_args)
|
||||||
|
|
||||||
|
def _set_zone_colors(self, zones: Dict[str, ZoneConfig]):
|
||||||
|
# set colors for zones
|
||||||
|
all_zone_names = zones.keys()
|
||||||
|
zone_colors = {}
|
||||||
|
colors = plt.cm.get_cmap('tab10', len(all_zone_names))
|
||||||
|
for i, zone in enumerate(all_zone_names):
|
||||||
|
zone_colors[zone] = tuple(int(round(255 * c)) for c in colors(i)[:3])
|
||||||
|
|
||||||
|
for name, zone in zones.items():
|
||||||
|
zone.color = zone_colors[name]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self):
|
||||||
|
return self._name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ffmpeg(self):
|
||||||
|
return self._ffmpeg
|
||||||
|
|
||||||
|
@property
|
||||||
|
def height(self):
|
||||||
|
return self._height
|
||||||
|
|
||||||
|
@property
|
||||||
|
def width(self):
|
||||||
|
return self._width
|
||||||
|
|
||||||
|
@property
|
||||||
|
def fps(self):
|
||||||
|
return self._fps
|
||||||
|
|
||||||
|
@property
|
||||||
|
def mask(self):
|
||||||
|
return self._mask
|
||||||
|
|
||||||
|
@property
|
||||||
|
def best_image_timeout(self):
|
||||||
|
return self._best_image_timeout
|
||||||
|
|
||||||
|
@property
|
||||||
|
def zones(self)-> Dict[str, ZoneConfig]:
|
||||||
|
return self._zones
|
||||||
|
|
||||||
|
@property
|
||||||
|
def save_clips(self):
|
||||||
|
return self._save_clips
|
||||||
|
|
||||||
|
@property
|
||||||
|
def record(self):
|
||||||
|
return self._record
|
||||||
|
|
||||||
|
@property
|
||||||
|
def rtmp(self):
|
||||||
|
return self._rtmp
|
||||||
|
|
||||||
|
@property
|
||||||
|
def snapshots(self):
|
||||||
|
return self._snapshots
|
||||||
|
|
||||||
|
@property
|
||||||
|
def objects(self):
|
||||||
|
return self._objects
|
||||||
|
|
||||||
|
@property
|
||||||
|
def frame_shape(self):
|
||||||
|
return self._frame_shape
|
||||||
|
|
||||||
|
@property
|
||||||
|
def frame_shape_yuv(self):
|
||||||
|
return self._frame_shape_yuv
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ffmpeg_cmds(self):
|
||||||
|
return self._ffmpeg_cmds
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'name': self.name,
|
||||||
|
'height': self.height,
|
||||||
|
'width': self.width,
|
||||||
|
'fps': self.fps,
|
||||||
|
'best_image_timeout': self.best_image_timeout,
|
||||||
|
'zones': {k: z.to_dict() for k, z in self.zones.items()},
|
||||||
|
'save_clips': self.save_clips.to_dict(),
|
||||||
|
'record': self.record.to_dict(),
|
||||||
|
'rtmp': self.rtmp.to_dict(),
|
||||||
|
'snapshots': self.snapshots.to_dict(),
|
||||||
|
'objects': self.objects.to_dict(),
|
||||||
|
'frame_shape': self.frame_shape,
|
||||||
|
'ffmpeg_cmds': [{'roles': c['roles'], 'cmd': ' '.join(c['cmd'])} for c in self.ffmpeg_cmds],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class FrigateConfig():
|
||||||
|
def __init__(self, config_file=None, config=None):
|
||||||
|
if config is None and config_file is None:
|
||||||
|
raise ValueError('config or config_file must be defined')
|
||||||
|
elif not config_file is None:
|
||||||
|
config = self._load_file(config_file)
|
||||||
|
|
||||||
|
config = FRIGATE_CONFIG_SCHEMA(config)
|
||||||
|
|
||||||
|
config = self._sub_env_vars(config)
|
||||||
|
|
||||||
|
self._detectors = { name: DetectorConfig(d) for name, d in config['detectors'].items() }
|
||||||
|
self._mqtt = MqttConfig(config['mqtt'])
|
||||||
|
self._save_clips = SaveClipsConfig(config['save_clips'])
|
||||||
|
self._cameras = { name: CameraConfig(name, c, config) for name, c in config['cameras'].items() }
|
||||||
|
self._logger = LoggerConfig(config['logger'])
|
||||||
|
|
||||||
|
def _sub_env_vars(self, config):
|
||||||
|
frigate_env_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
|
||||||
|
|
||||||
|
if 'password' in config['mqtt']:
|
||||||
|
config['mqtt']['password'] = config['mqtt']['password'].format(**frigate_env_vars)
|
||||||
|
|
||||||
|
for camera in config['cameras'].values():
|
||||||
|
for i in camera['ffmpeg']['inputs']:
|
||||||
|
i['path'] = i['path'].format(**frigate_env_vars)
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
|
def _load_file(self, config_file):
|
||||||
|
with open(config_file) as f:
|
||||||
|
raw_config = f.read()
|
||||||
|
|
||||||
|
if config_file.endswith(".yml"):
|
||||||
|
config = yaml.safe_load(raw_config)
|
||||||
|
elif config_file.endswith(".json"):
|
||||||
|
config = json.loads(raw_config)
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'detectors': {k: d.to_dict() for k, d in self.detectors.items()},
|
||||||
|
'mqtt': self.mqtt.to_dict(),
|
||||||
|
'save_clips': self.save_clips.to_dict(),
|
||||||
|
'cameras': {k: c.to_dict() for k, c in self.cameras.items()},
|
||||||
|
'logger': self.logger.to_dict()
|
||||||
|
}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def detectors(self) -> Dict[str, DetectorConfig]:
|
||||||
|
return self._detectors
|
||||||
|
|
||||||
|
@property
|
||||||
|
def logger(self):
|
||||||
|
return self._logger
|
||||||
|
|
||||||
|
@property
|
||||||
|
def mqtt(self):
|
||||||
|
return self._mqtt
|
||||||
|
|
||||||
|
@property
|
||||||
|
def save_clips(self):
|
||||||
|
return self._save_clips
|
||||||
|
|
||||||
|
@property
|
||||||
|
def cameras(self) -> Dict[str, CameraConfig]:
|
||||||
|
return self._cameras
|
||||||
3
frigate/const.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
CLIPS_DIR = '/media/frigate/clips'
|
||||||
|
RECORD_DIR = '/media/frigate/recordings'
|
||||||
|
CACHE_DIR = '/tmp/cache'
|
||||||
@@ -1,15 +1,22 @@
|
|||||||
import os
|
|
||||||
import datetime
|
import datetime
|
||||||
import hashlib
|
import hashlib
|
||||||
|
import logging
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
|
import os
|
||||||
import queue
|
import queue
|
||||||
from multiprocessing.connection import Connection
|
import threading
|
||||||
|
import signal
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
|
from multiprocessing.connection import Connection
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tflite_runtime.interpreter as tflite
|
import tflite_runtime.interpreter as tflite
|
||||||
from tflite_runtime.interpreter import load_delegate
|
from tflite_runtime.interpreter import load_delegate
|
||||||
from frigate.util import EventsPerSecond, listen, SharedMemoryFrameManager
|
|
||||||
|
from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
def load_labels(path, encoding='utf-8'):
|
def load_labels(path, encoding='utf-8'):
|
||||||
"""Loads labels from file (with or without index numbers).
|
"""Loads labels from file (with or without index numbers).
|
||||||
@@ -51,11 +58,11 @@ class LocalObjectDetector(ObjectDetector):
|
|||||||
|
|
||||||
if tf_device != 'cpu':
|
if tf_device != 'cpu':
|
||||||
try:
|
try:
|
||||||
print(f"Attempting to load TPU as {device_config['device']}")
|
logger.info(f"Attempting to load TPU as {device_config['device']}")
|
||||||
edge_tpu_delegate = load_delegate('libedgetpu.so.1.0', device_config)
|
edge_tpu_delegate = load_delegate('libedgetpu.so.1.0', device_config)
|
||||||
print("TPU found")
|
logger.info("TPU found")
|
||||||
except ValueError:
|
except ValueError:
|
||||||
print("No EdgeTPU detected. Falling back to CPU.")
|
logger.info("No EdgeTPU detected. Falling back to CPU.")
|
||||||
|
|
||||||
if edge_tpu_delegate is None:
|
if edge_tpu_delegate is None:
|
||||||
self.interpreter = tflite.Interpreter(
|
self.interpreter = tflite.Interpreter(
|
||||||
@@ -99,9 +106,19 @@ class LocalObjectDetector(ObjectDetector):
|
|||||||
|
|
||||||
return detections
|
return detections
|
||||||
|
|
||||||
def run_detector(detection_queue, out_events: Dict[str, mp.Event], avg_speed, start, tf_device):
|
def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.Event], avg_speed, start, tf_device):
|
||||||
print(f"Starting detection process: {os.getpid()}")
|
threading.current_thread().name = f"detector:{name}"
|
||||||
|
logger = logging.getLogger(f"detector.{name}")
|
||||||
|
logger.info(f"Starting detection process: {os.getpid()}")
|
||||||
listen()
|
listen()
|
||||||
|
|
||||||
|
stop_event = mp.Event()
|
||||||
|
def receiveSignal(signalNumber, frame):
|
||||||
|
stop_event.set()
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, receiveSignal)
|
||||||
|
signal.signal(signal.SIGINT, receiveSignal)
|
||||||
|
|
||||||
frame_manager = SharedMemoryFrameManager()
|
frame_manager = SharedMemoryFrameManager()
|
||||||
object_detector = LocalObjectDetector(tf_device=tf_device)
|
object_detector = LocalObjectDetector(tf_device=tf_device)
|
||||||
|
|
||||||
@@ -115,7 +132,13 @@ def run_detector(detection_queue, out_events: Dict[str, mp.Event], avg_speed, st
|
|||||||
}
|
}
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
connection_id = detection_queue.get()
|
if stop_event.is_set():
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
connection_id = detection_queue.get(timeout=5)
|
||||||
|
except queue.Empty:
|
||||||
|
continue
|
||||||
input_frame = frame_manager.get(connection_id, (1,300,300,3))
|
input_frame = frame_manager.get(connection_id, (1,300,300,3))
|
||||||
|
|
||||||
if input_frame is None:
|
if input_frame is None:
|
||||||
@@ -132,7 +155,8 @@ def run_detector(detection_queue, out_events: Dict[str, mp.Event], avg_speed, st
|
|||||||
avg_speed.value = (avg_speed.value*9 + duration)/10
|
avg_speed.value = (avg_speed.value*9 + duration)/10
|
||||||
|
|
||||||
class EdgeTPUProcess():
|
class EdgeTPUProcess():
|
||||||
def __init__(self, detection_queue, out_events, tf_device=None):
|
def __init__(self, name, detection_queue, out_events, tf_device=None):
|
||||||
|
self.name = name
|
||||||
self.out_events = out_events
|
self.out_events = out_events
|
||||||
self.detection_queue = detection_queue
|
self.detection_queue = detection_queue
|
||||||
self.avg_inference_speed = mp.Value('d', 0.01)
|
self.avg_inference_speed = mp.Value('d', 0.01)
|
||||||
@@ -143,10 +167,10 @@ class EdgeTPUProcess():
|
|||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
self.detect_process.terminate()
|
self.detect_process.terminate()
|
||||||
print("Waiting for detection process to exit gracefully...")
|
logging.info("Waiting for detection process to exit gracefully...")
|
||||||
self.detect_process.join(timeout=30)
|
self.detect_process.join(timeout=30)
|
||||||
if self.detect_process.exitcode is None:
|
if self.detect_process.exitcode is None:
|
||||||
print("Detection process didnt exit. Force killing...")
|
logging.info("Detection process didnt exit. Force killing...")
|
||||||
self.detect_process.kill()
|
self.detect_process.kill()
|
||||||
self.detect_process.join()
|
self.detect_process.join()
|
||||||
|
|
||||||
@@ -154,7 +178,7 @@ class EdgeTPUProcess():
|
|||||||
self.detection_start.value = 0.0
|
self.detection_start.value = 0.0
|
||||||
if (not self.detect_process is None) and self.detect_process.is_alive():
|
if (not self.detect_process is None) and self.detect_process.is_alive():
|
||||||
self.stop()
|
self.stop()
|
||||||
self.detect_process = mp.Process(target=run_detector, args=(self.detection_queue, self.out_events, self.avg_inference_speed, self.detection_start, self.tf_device))
|
self.detect_process = mp.Process(target=run_detector, name=f"detector:{self.name}", args=(self.name, self.detection_queue, self.out_events, self.avg_inference_speed, self.detection_start, self.tf_device))
|
||||||
self.detect_process.daemon = True
|
self.detect_process.daemon = True
|
||||||
self.detect_process.start()
|
self.detect_process.start()
|
||||||
|
|
||||||
@@ -177,7 +201,11 @@ class RemoteObjectDetector():
|
|||||||
self.np_shm[:] = tensor_input[:]
|
self.np_shm[:] = tensor_input[:]
|
||||||
self.event.clear()
|
self.event.clear()
|
||||||
self.detection_queue.put(self.name)
|
self.detection_queue.put(self.name)
|
||||||
self.event.wait()
|
result = self.event.wait(timeout=10.0)
|
||||||
|
|
||||||
|
# if it timed out
|
||||||
|
if result is None:
|
||||||
|
return detections
|
||||||
|
|
||||||
for d in self.out_np_shm:
|
for d in self.out_np_shm:
|
||||||
if d[1] < threshold:
|
if d[1] < threshold:
|
||||||
|
|||||||
@@ -1,36 +1,48 @@
|
|||||||
import os
|
|
||||||
import time
|
|
||||||
import psutil
|
|
||||||
import threading
|
|
||||||
from collections import defaultdict
|
|
||||||
import json
|
|
||||||
import datetime
|
import datetime
|
||||||
import subprocess as sp
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
import queue
|
import queue
|
||||||
|
import subprocess as sp
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from collections import defaultdict
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import psutil
|
||||||
|
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
|
||||||
|
from frigate.models import Event
|
||||||
|
|
||||||
|
from peewee import fn
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
class EventProcessor(threading.Thread):
|
class EventProcessor(threading.Thread):
|
||||||
def __init__(self, config, camera_processes, cache_dir, clip_dir, event_queue, stop_event):
|
def __init__(self, config, camera_processes, event_queue, event_processed_queue, stop_event):
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
|
self.name = 'event_processor'
|
||||||
self.config = config
|
self.config = config
|
||||||
self.camera_processes = camera_processes
|
self.camera_processes = camera_processes
|
||||||
self.cache_dir = cache_dir
|
|
||||||
self.clip_dir = clip_dir
|
|
||||||
self.cached_clips = {}
|
self.cached_clips = {}
|
||||||
self.event_queue = event_queue
|
self.event_queue = event_queue
|
||||||
|
self.event_processed_queue = event_processed_queue
|
||||||
self.events_in_process = {}
|
self.events_in_process = {}
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
|
|
||||||
def refresh_cache(self):
|
def refresh_cache(self):
|
||||||
cached_files = os.listdir(self.cache_dir)
|
cached_files = os.listdir(CACHE_DIR)
|
||||||
|
|
||||||
files_in_use = []
|
files_in_use = []
|
||||||
for process_data in self.camera_processes.values():
|
for process in psutil.process_iter():
|
||||||
|
if process.name() != 'ffmpeg':
|
||||||
|
continue
|
||||||
try:
|
try:
|
||||||
ffmpeg_process = psutil.Process(pid=process_data['ffmpeg_process'].pid)
|
flist = process.open_files()
|
||||||
flist = ffmpeg_process.open_files()
|
|
||||||
if flist:
|
if flist:
|
||||||
for nt in flist:
|
for nt in flist:
|
||||||
if nt.path.startswith(self.cache_dir):
|
if nt.path.startswith(CACHE_DIR):
|
||||||
files_in_use.append(nt.path.split('/')[-1])
|
files_in_use.append(nt.path.split('/')[-1])
|
||||||
except:
|
except:
|
||||||
continue
|
continue
|
||||||
@@ -50,7 +62,7 @@ class EventProcessor(threading.Thread):
|
|||||||
'format=duration',
|
'format=duration',
|
||||||
'-of',
|
'-of',
|
||||||
'default=noprint_wrappers=1:nokey=1',
|
'default=noprint_wrappers=1:nokey=1',
|
||||||
f"{os.path.join(self.cache_dir,f)}"
|
f"{os.path.join(CACHE_DIR,f)}"
|
||||||
])
|
])
|
||||||
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
|
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
|
||||||
(output, err) = p.communicate()
|
(output, err) = p.communicate()
|
||||||
@@ -58,8 +70,8 @@ class EventProcessor(threading.Thread):
|
|||||||
if p_status == 0:
|
if p_status == 0:
|
||||||
duration = float(output.decode('utf-8').strip())
|
duration = float(output.decode('utf-8').strip())
|
||||||
else:
|
else:
|
||||||
print(f"bad file: {f}")
|
logger.info(f"bad file: {f}")
|
||||||
os.remove(os.path.join(self.cache_dir,f))
|
os.remove(os.path.join(CACHE_DIR,f))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
self.cached_clips[f] = {
|
self.cached_clips[f] = {
|
||||||
@@ -75,14 +87,14 @@ class EventProcessor(threading.Thread):
|
|||||||
earliest_event = datetime.datetime.now().timestamp()
|
earliest_event = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
# if the earliest event exceeds the max seconds, cap it
|
# if the earliest event exceeds the max seconds, cap it
|
||||||
max_seconds = self.config.get('save_clips', {}).get('max_seconds', 300)
|
max_seconds = self.config.save_clips.max_seconds
|
||||||
if datetime.datetime.now().timestamp()-earliest_event > max_seconds:
|
if datetime.datetime.now().timestamp()-earliest_event > max_seconds:
|
||||||
earliest_event = datetime.datetime.now().timestamp()-max_seconds
|
earliest_event = datetime.datetime.now().timestamp()-max_seconds
|
||||||
|
|
||||||
for f, data in list(self.cached_clips.items()):
|
for f, data in list(self.cached_clips.items()):
|
||||||
if earliest_event-90 > data['start_time']+data['duration']:
|
if earliest_event-90 > data['start_time']+data['duration']:
|
||||||
del self.cached_clips[f]
|
del self.cached_clips[f]
|
||||||
os.remove(os.path.join(self.cache_dir,f))
|
os.remove(os.path.join(CACHE_DIR,f))
|
||||||
|
|
||||||
def create_clip(self, camera, event_data, pre_capture):
|
def create_clip(self, camera, event_data, pre_capture):
|
||||||
# get all clips from the camera with the event sorted
|
# get all clips from the camera with the event sorted
|
||||||
@@ -104,7 +116,7 @@ class EventProcessor(threading.Thread):
|
|||||||
# clip starts after playlist ends, finish
|
# clip starts after playlist ends, finish
|
||||||
if clip['start_time'] > playlist_end:
|
if clip['start_time'] > playlist_end:
|
||||||
break
|
break
|
||||||
playlist_lines.append(f"file '{os.path.join(self.cache_dir,clip['path'])}'")
|
playlist_lines.append(f"file '{os.path.join(CACHE_DIR,clip['path'])}'")
|
||||||
# if this is the starting clip, add an inpoint
|
# if this is the starting clip, add an inpoint
|
||||||
if clip['start_time'] < playlist_start:
|
if clip['start_time'] < playlist_start:
|
||||||
playlist_lines.append(f"inpoint {int(playlist_start-clip['start_time'])}")
|
playlist_lines.append(f"inpoint {int(playlist_start-clip['start_time'])}")
|
||||||
@@ -126,21 +138,18 @@ class EventProcessor(threading.Thread):
|
|||||||
'-',
|
'-',
|
||||||
'-c',
|
'-c',
|
||||||
'copy',
|
'copy',
|
||||||
f"{os.path.join(self.clip_dir, clip_name)}.mp4"
|
f"{os.path.join(CLIPS_DIR, clip_name)}.mp4"
|
||||||
]
|
]
|
||||||
|
|
||||||
p = sp.run(ffmpeg_cmd, input="\n".join(playlist_lines), encoding='ascii', capture_output=True)
|
p = sp.run(ffmpeg_cmd, input="\n".join(playlist_lines), encoding='ascii', capture_output=True)
|
||||||
if p.returncode != 0:
|
if p.returncode != 0:
|
||||||
print(p.stderr)
|
logger.error(p.stderr)
|
||||||
return
|
return
|
||||||
|
|
||||||
with open(f"{os.path.join(self.clip_dir, clip_name)}.json", 'w') as outfile:
|
|
||||||
json.dump(event_data, outfile)
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
while True:
|
while True:
|
||||||
if self.stop_event.is_set():
|
if self.stop_event.is_set():
|
||||||
print(f"Exiting event processor...")
|
logger.info(f"Exiting event processor...")
|
||||||
break
|
break
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -152,23 +161,126 @@ class EventProcessor(threading.Thread):
|
|||||||
|
|
||||||
self.refresh_cache()
|
self.refresh_cache()
|
||||||
|
|
||||||
save_clips_config = self.config['cameras'][camera].get('save_clips', {})
|
save_clips_config = self.config.cameras[camera].save_clips
|
||||||
|
|
||||||
# if save clips is not enabled for this camera, just continue
|
# if save clips is not enabled for this camera, just continue
|
||||||
if not save_clips_config.get('enabled', False):
|
if not save_clips_config.enabled:
|
||||||
|
if event_type == 'end':
|
||||||
|
self.event_processed_queue.put((event_data['id'], camera))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# if specific objects are listed for this camera, only save clips for them
|
# if specific objects are listed for this camera, only save clips for them
|
||||||
if 'objects' in save_clips_config:
|
if not event_data['label'] in save_clips_config.objects:
|
||||||
if not event_data['label'] in save_clips_config['objects']:
|
if event_type == 'end':
|
||||||
continue
|
self.event_processed_queue.put((event_data['id'], camera))
|
||||||
|
continue
|
||||||
|
|
||||||
if event_type == 'start':
|
if event_type == 'start':
|
||||||
self.events_in_process[event_data['id']] = event_data
|
self.events_in_process[event_data['id']] = event_data
|
||||||
|
|
||||||
if event_type == 'end':
|
if event_type == 'end':
|
||||||
if len(self.cached_clips) > 0 and not event_data['false_positive']:
|
if len(self.cached_clips) > 0 and not event_data['false_positive']:
|
||||||
self.create_clip(camera, event_data, save_clips_config.get('pre_capture', 30))
|
self.create_clip(camera, event_data, save_clips_config.pre_capture)
|
||||||
|
Event.create(
|
||||||
|
id=event_data['id'],
|
||||||
|
label=event_data['label'],
|
||||||
|
camera=camera,
|
||||||
|
start_time=event_data['start_time'],
|
||||||
|
end_time=event_data['end_time'],
|
||||||
|
top_score=event_data['top_score'],
|
||||||
|
false_positive=event_data['false_positive'],
|
||||||
|
zones=list(event_data['entered_zones']),
|
||||||
|
thumbnail=event_data['thumbnail']
|
||||||
|
)
|
||||||
del self.events_in_process[event_data['id']]
|
del self.events_in_process[event_data['id']]
|
||||||
|
self.event_processed_queue.put((event_data['id'], camera))
|
||||||
|
|
||||||
|
class EventCleanup(threading.Thread):
|
||||||
|
def __init__(self, config: FrigateConfig, stop_event):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.name = 'event_cleanup'
|
||||||
|
self.config = config
|
||||||
|
self.stop_event = stop_event
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
counter = 0
|
||||||
|
while(True):
|
||||||
|
if self.stop_event.is_set():
|
||||||
|
logger.info(f"Exiting event cleanup...")
|
||||||
|
break
|
||||||
|
|
||||||
|
# only expire events every 10 minutes, but check for stop events every 10 seconds
|
||||||
|
time.sleep(10)
|
||||||
|
counter = counter + 1
|
||||||
|
if counter < 60:
|
||||||
|
continue
|
||||||
|
counter = 0
|
||||||
|
|
||||||
|
camera_keys = list(self.config.cameras.keys())
|
||||||
|
|
||||||
|
# Expire events from unlisted cameras based on the global config
|
||||||
|
retain_config = self.config.save_clips.retain
|
||||||
|
|
||||||
|
distinct_labels = (Event.select(Event.label)
|
||||||
|
.where(Event.camera.not_in(camera_keys))
|
||||||
|
.distinct())
|
||||||
|
|
||||||
|
# loop over object types in db
|
||||||
|
for l in distinct_labels:
|
||||||
|
# get expiration time for this label
|
||||||
|
expire_days = retain_config.objects.get(l.label, retain_config.default)
|
||||||
|
expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp()
|
||||||
|
# grab all events after specific time
|
||||||
|
expired_events = (
|
||||||
|
Event.select()
|
||||||
|
.where(Event.camera.not_in(camera_keys),
|
||||||
|
Event.start_time < expire_after,
|
||||||
|
Event.label == l.label)
|
||||||
|
)
|
||||||
|
# delete the grabbed clips from disk
|
||||||
|
for event in expired_events:
|
||||||
|
clip_name = f"{event.camera}-{event.id}"
|
||||||
|
clip = Path(f"{os.path.join(CLIPS_DIR, clip_name)}.mp4")
|
||||||
|
clip.unlink(missing_ok=True)
|
||||||
|
# delete the event for this type from the db
|
||||||
|
delete_query = (
|
||||||
|
Event.delete()
|
||||||
|
.where(Event.camera.not_in(camera_keys),
|
||||||
|
Event.start_time < expire_after,
|
||||||
|
Event.label == l.label)
|
||||||
|
)
|
||||||
|
delete_query.execute()
|
||||||
|
|
||||||
|
# Expire events from cameras based on the camera config
|
||||||
|
for name, camera in self.config.cameras.items():
|
||||||
|
retain_config = camera.save_clips.retain
|
||||||
|
# get distinct objects in database for this camera
|
||||||
|
distinct_labels = (Event.select(Event.label)
|
||||||
|
.where(Event.camera == name)
|
||||||
|
.distinct())
|
||||||
|
|
||||||
|
# loop over object types in db
|
||||||
|
for l in distinct_labels:
|
||||||
|
# get expiration time for this label
|
||||||
|
expire_days = retain_config.objects.get(l.label, retain_config.default)
|
||||||
|
expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp()
|
||||||
|
# grab all events after specific time
|
||||||
|
expired_events = (
|
||||||
|
Event.select()
|
||||||
|
.where(Event.camera == name,
|
||||||
|
Event.start_time < expire_after,
|
||||||
|
Event.label == l.label)
|
||||||
|
)
|
||||||
|
# delete the grabbed clips from disk
|
||||||
|
for event in expired_events:
|
||||||
|
clip_name = f"{event.camera}-{event.id}"
|
||||||
|
clip = Path(f"{os.path.join(CLIPS_DIR, clip_name)}.mp4")
|
||||||
|
clip.unlink(missing_ok=True)
|
||||||
|
# delete the event for this type from the db
|
||||||
|
delete_query = (
|
||||||
|
Event.delete()
|
||||||
|
.where( Event.camera == name,
|
||||||
|
Event.start_time < expire_after,
|
||||||
|
Event.label == l.label)
|
||||||
|
)
|
||||||
|
delete_query.execute()
|
||||||
|
|||||||
246
frigate/http.py
Normal file
@@ -0,0 +1,246 @@
|
|||||||
|
import base64
|
||||||
|
import datetime
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from functools import reduce
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
from flask import (Blueprint, Flask, Response, current_app, jsonify,
|
||||||
|
make_response, request)
|
||||||
|
from peewee import SqliteDatabase, operator, fn, DoesNotExist
|
||||||
|
from playhouse.shortcuts import model_to_dict
|
||||||
|
|
||||||
|
from frigate.models import Event
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
bp = Blueprint('frigate', __name__)
|
||||||
|
|
||||||
|
def create_app(frigate_config, database: SqliteDatabase, camera_metrics, detectors, detected_frames_processor):
|
||||||
|
app = Flask(__name__)
|
||||||
|
|
||||||
|
@app.before_request
|
||||||
|
def _db_connect():
|
||||||
|
database.connect()
|
||||||
|
|
||||||
|
@app.teardown_request
|
||||||
|
def _db_close(exc):
|
||||||
|
if not database.is_closed():
|
||||||
|
database.close()
|
||||||
|
|
||||||
|
app.frigate_config = frigate_config
|
||||||
|
app.camera_metrics = camera_metrics
|
||||||
|
app.detectors = detectors
|
||||||
|
app.detected_frames_processor = detected_frames_processor
|
||||||
|
|
||||||
|
app.register_blueprint(bp)
|
||||||
|
|
||||||
|
return app
|
||||||
|
|
||||||
|
@bp.route('/')
|
||||||
|
def is_healthy():
|
||||||
|
return "Frigate is running. Alive and healthy!"
|
||||||
|
|
||||||
|
@bp.route('/events/summary')
|
||||||
|
def events_summary():
|
||||||
|
groups = (
|
||||||
|
Event
|
||||||
|
.select(
|
||||||
|
Event.camera,
|
||||||
|
Event.label,
|
||||||
|
fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')).alias('day'),
|
||||||
|
Event.zones,
|
||||||
|
fn.COUNT(Event.id).alias('count')
|
||||||
|
)
|
||||||
|
.group_by(
|
||||||
|
Event.camera,
|
||||||
|
Event.label,
|
||||||
|
fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')),
|
||||||
|
Event.zones
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return jsonify([e for e in groups.dicts()])
|
||||||
|
|
||||||
|
@bp.route('/events/<id>')
|
||||||
|
def event(id):
|
||||||
|
try:
|
||||||
|
return model_to_dict(Event.get(Event.id == id))
|
||||||
|
except DoesNotExist:
|
||||||
|
return "Event not found", 404
|
||||||
|
|
||||||
|
@bp.route('/events/<id>/snapshot.jpg')
|
||||||
|
def event_snapshot(id):
|
||||||
|
format = request.args.get('format', 'ios')
|
||||||
|
thumbnail_bytes = None
|
||||||
|
try:
|
||||||
|
event = Event.get(Event.id == id)
|
||||||
|
thumbnail_bytes = base64.b64decode(event.thumbnail)
|
||||||
|
except DoesNotExist:
|
||||||
|
# see if the object is currently being tracked
|
||||||
|
try:
|
||||||
|
for camera_state in current_app.detected_frames_processor.camera_states.values():
|
||||||
|
if id in camera_state.tracked_objects:
|
||||||
|
tracked_obj = camera_state.tracked_objects.get(id)
|
||||||
|
if not tracked_obj is None:
|
||||||
|
thumbnail_bytes = tracked_obj.get_jpg_bytes()
|
||||||
|
except:
|
||||||
|
return "Event not found", 404
|
||||||
|
|
||||||
|
if thumbnail_bytes is None:
|
||||||
|
return "Event not found", 404
|
||||||
|
|
||||||
|
# android notifications prefer a 2:1 ratio
|
||||||
|
if format == 'android':
|
||||||
|
jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
|
||||||
|
img = cv2.imdecode(jpg_as_np, flags=1)
|
||||||
|
thumbnail = cv2.copyMakeBorder(img, 0, 0, int(img.shape[1]*0.5), int(img.shape[1]*0.5), cv2.BORDER_CONSTANT, (0,0,0))
|
||||||
|
ret, jpg = cv2.imencode('.jpg', thumbnail)
|
||||||
|
thumbnail_bytes = jpg.tobytes()
|
||||||
|
|
||||||
|
response = make_response(thumbnail_bytes)
|
||||||
|
response.headers['Content-Type'] = 'image/jpg'
|
||||||
|
return response
|
||||||
|
|
||||||
|
@bp.route('/events')
|
||||||
|
def events():
|
||||||
|
limit = request.args.get('limit', 100)
|
||||||
|
camera = request.args.get('camera')
|
||||||
|
label = request.args.get('label')
|
||||||
|
zone = request.args.get('zone')
|
||||||
|
after = request.args.get('after', type=int)
|
||||||
|
before = request.args.get('before', type=int)
|
||||||
|
|
||||||
|
clauses = []
|
||||||
|
|
||||||
|
if camera:
|
||||||
|
clauses.append((Event.camera == camera))
|
||||||
|
|
||||||
|
if label:
|
||||||
|
clauses.append((Event.label == label))
|
||||||
|
|
||||||
|
if zone:
|
||||||
|
clauses.append((Event.zones.cast('text') % f"*\"{zone}\"*"))
|
||||||
|
|
||||||
|
if after:
|
||||||
|
clauses.append((Event.start_time >= after))
|
||||||
|
|
||||||
|
if before:
|
||||||
|
clauses.append((Event.start_time <= before))
|
||||||
|
|
||||||
|
if len(clauses) == 0:
|
||||||
|
clauses.append((1 == 1))
|
||||||
|
|
||||||
|
events = (Event.select()
|
||||||
|
.where(reduce(operator.and_, clauses))
|
||||||
|
.order_by(Event.start_time.desc())
|
||||||
|
.limit(limit))
|
||||||
|
|
||||||
|
return jsonify([model_to_dict(e) for e in events])
|
||||||
|
|
||||||
|
@bp.route('/config')
|
||||||
|
def config():
|
||||||
|
return jsonify(current_app.frigate_config.to_dict())
|
||||||
|
|
||||||
|
@bp.route('/stats')
|
||||||
|
def stats():
|
||||||
|
camera_metrics = current_app.camera_metrics
|
||||||
|
stats = {}
|
||||||
|
|
||||||
|
total_detection_fps = 0
|
||||||
|
|
||||||
|
for name, camera_stats in camera_metrics.items():
|
||||||
|
total_detection_fps += camera_stats['detection_fps'].value
|
||||||
|
stats[name] = {
|
||||||
|
'camera_fps': round(camera_stats['camera_fps'].value, 2),
|
||||||
|
'process_fps': round(camera_stats['process_fps'].value, 2),
|
||||||
|
'skipped_fps': round(camera_stats['skipped_fps'].value, 2),
|
||||||
|
'detection_fps': round(camera_stats['detection_fps'].value, 2),
|
||||||
|
'pid': camera_stats['process'].pid,
|
||||||
|
'capture_pid': camera_stats['capture_process'].pid
|
||||||
|
}
|
||||||
|
|
||||||
|
stats['detectors'] = {}
|
||||||
|
for name, detector in current_app.detectors.items():
|
||||||
|
stats['detectors'][name] = {
|
||||||
|
'inference_speed': round(detector.avg_inference_speed.value*1000, 2),
|
||||||
|
'detection_start': detector.detection_start.value,
|
||||||
|
'pid': detector.detect_process.pid
|
||||||
|
}
|
||||||
|
stats['detection_fps'] = round(total_detection_fps, 2)
|
||||||
|
|
||||||
|
return jsonify(stats)
|
||||||
|
|
||||||
|
@bp.route('/<camera_name>/<label>/best.jpg')
|
||||||
|
def best(camera_name, label):
|
||||||
|
if camera_name in current_app.frigate_config.cameras:
|
||||||
|
best_object = current_app.detected_frames_processor.get_best(camera_name, label)
|
||||||
|
best_frame = best_object.get('frame')
|
||||||
|
if best_frame is None:
|
||||||
|
best_frame = np.zeros((720,1280,3), np.uint8)
|
||||||
|
else:
|
||||||
|
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
|
||||||
|
|
||||||
|
crop = bool(request.args.get('crop', 0, type=int))
|
||||||
|
if crop:
|
||||||
|
region = best_object.get('region', [0,0,300,300])
|
||||||
|
best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
|
||||||
|
|
||||||
|
height = int(request.args.get('h', str(best_frame.shape[0])))
|
||||||
|
width = int(height*best_frame.shape[1]/best_frame.shape[0])
|
||||||
|
|
||||||
|
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||||
|
ret, jpg = cv2.imencode('.jpg', best_frame)
|
||||||
|
response = make_response(jpg.tobytes())
|
||||||
|
response.headers['Content-Type'] = 'image/jpg'
|
||||||
|
return response
|
||||||
|
else:
|
||||||
|
return "Camera named {} not found".format(camera_name), 404
|
||||||
|
|
||||||
|
@bp.route('/<camera_name>')
|
||||||
|
def mjpeg_feed(camera_name):
|
||||||
|
fps = int(request.args.get('fps', '3'))
|
||||||
|
height = int(request.args.get('h', '360'))
|
||||||
|
if camera_name in current_app.frigate_config.cameras:
|
||||||
|
# return a multipart response
|
||||||
|
return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height),
|
||||||
|
mimetype='multipart/x-mixed-replace; boundary=frame')
|
||||||
|
else:
|
||||||
|
return "Camera named {} not found".format(camera_name), 404
|
||||||
|
|
||||||
|
@bp.route('/<camera_name>/latest.jpg')
|
||||||
|
def latest_frame(camera_name):
|
||||||
|
if camera_name in current_app.frigate_config.cameras:
|
||||||
|
# max out at specified FPS
|
||||||
|
frame = current_app.detected_frames_processor.get_current_frame(camera_name)
|
||||||
|
if frame is None:
|
||||||
|
frame = np.zeros((720,1280,3), np.uint8)
|
||||||
|
|
||||||
|
height = int(request.args.get('h', str(frame.shape[0])))
|
||||||
|
width = int(height*frame.shape[1]/frame.shape[0])
|
||||||
|
|
||||||
|
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||||
|
|
||||||
|
ret, jpg = cv2.imencode('.jpg', frame)
|
||||||
|
response = make_response(jpg.tobytes())
|
||||||
|
response.headers['Content-Type'] = 'image/jpg'
|
||||||
|
return response
|
||||||
|
else:
|
||||||
|
return "Camera named {} not found".format(camera_name), 404
|
||||||
|
|
||||||
|
def imagestream(detected_frames_processor, camera_name, fps, height):
|
||||||
|
while True:
|
||||||
|
# max out at specified FPS
|
||||||
|
time.sleep(1/fps)
|
||||||
|
frame = detected_frames_processor.get_current_frame(camera_name, draw=True)
|
||||||
|
if frame is None:
|
||||||
|
frame = np.zeros((height,int(height*16/9),3), np.uint8)
|
||||||
|
|
||||||
|
width = int(height*frame.shape[1]/frame.shape[0])
|
||||||
|
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
|
||||||
|
|
||||||
|
ret, jpg = cv2.imencode('.jpg', frame)
|
||||||
|
yield (b'--frame\r\n'
|
||||||
|
b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
|
||||||
75
frigate/log.py
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
# adapted from https://medium.com/@jonathonbao/python3-logging-with-multiprocessing-f51f460b8778
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
import os
|
||||||
|
import signal
|
||||||
|
import queue
|
||||||
|
import multiprocessing as mp
|
||||||
|
from logging import handlers
|
||||||
|
|
||||||
|
|
||||||
|
def listener_configurer():
|
||||||
|
root = logging.getLogger()
|
||||||
|
console_handler = logging.StreamHandler()
|
||||||
|
formatter = logging.Formatter('%(name)-30s %(levelname)-8s: %(message)s')
|
||||||
|
console_handler.setFormatter(formatter)
|
||||||
|
root.addHandler(console_handler)
|
||||||
|
root.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
def root_configurer(queue):
|
||||||
|
h = handlers.QueueHandler(queue)
|
||||||
|
root = logging.getLogger()
|
||||||
|
root.addHandler(h)
|
||||||
|
root.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
def log_process(log_queue):
|
||||||
|
stop_event = mp.Event()
|
||||||
|
def receiveSignal(signalNumber, frame):
|
||||||
|
stop_event.set()
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, receiveSignal)
|
||||||
|
signal.signal(signal.SIGINT, receiveSignal)
|
||||||
|
|
||||||
|
threading.current_thread().name = f"logger"
|
||||||
|
listener_configurer()
|
||||||
|
while True:
|
||||||
|
if stop_event.is_set() and log_queue.empty():
|
||||||
|
break
|
||||||
|
try:
|
||||||
|
record = log_queue.get(timeout=5)
|
||||||
|
except queue.Empty:
|
||||||
|
continue
|
||||||
|
logger = logging.getLogger(record.name)
|
||||||
|
logger.handle(record)
|
||||||
|
|
||||||
|
# based on https://codereview.stackexchange.com/a/17959
|
||||||
|
class LogPipe(threading.Thread):
|
||||||
|
def __init__(self, log_name, level):
|
||||||
|
"""Setup the object with a logger and a loglevel
|
||||||
|
and start the thread
|
||||||
|
"""
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.daemon = False
|
||||||
|
self.logger = logging.getLogger(log_name)
|
||||||
|
self.level = level
|
||||||
|
self.fdRead, self.fdWrite = os.pipe()
|
||||||
|
self.pipeReader = os.fdopen(self.fdRead)
|
||||||
|
self.start()
|
||||||
|
|
||||||
|
def fileno(self):
|
||||||
|
"""Return the write file descriptor of the pipe
|
||||||
|
"""
|
||||||
|
return self.fdWrite
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
"""Run the thread, logging everything.
|
||||||
|
"""
|
||||||
|
for line in iter(self.pipeReader.readline, ''):
|
||||||
|
self.logger.log(self.level, line.strip('\n'))
|
||||||
|
|
||||||
|
self.pipeReader.close()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Close the write end of the pipe.
|
||||||
|
"""
|
||||||
|
os.close(self.fdWrite)
|
||||||
14
frigate/models.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
from peewee import *
|
||||||
|
from playhouse.sqlite_ext import *
|
||||||
|
|
||||||
|
|
||||||
|
class Event(Model):
|
||||||
|
id = CharField(null=False, primary_key=True, max_length=30)
|
||||||
|
label = CharField(index=True, max_length=20)
|
||||||
|
camera = CharField(index=True, max_length=20)
|
||||||
|
start_time = DateTimeField()
|
||||||
|
end_time = DateTimeField()
|
||||||
|
top_score = FloatField()
|
||||||
|
false_positive = BooleanField()
|
||||||
|
zones = JSONField()
|
||||||
|
thumbnail = TextField()
|
||||||
@@ -2,6 +2,7 @@ import cv2
|
|||||||
import imutils
|
import imutils
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
class MotionDetector():
|
class MotionDetector():
|
||||||
def __init__(self, frame_shape, mask, resize_factor=4):
|
def __init__(self, frame_shape, mask, resize_factor=4):
|
||||||
self.frame_shape = frame_shape
|
self.frame_shape = frame_shape
|
||||||
@@ -48,7 +49,7 @@ class MotionDetector():
|
|||||||
|
|
||||||
# black out everything in the avg_delta where there isnt motion in the current frame
|
# black out everything in the avg_delta where there isnt motion in the current frame
|
||||||
avg_delta_image = cv2.convertScaleAbs(self.avg_delta)
|
avg_delta_image = cv2.convertScaleAbs(self.avg_delta)
|
||||||
avg_delta_image[np.where(current_thresh==[0])] = [0]
|
avg_delta_image = cv2.bitwise_and(avg_delta_image, current_thresh)
|
||||||
|
|
||||||
# then look for deltas above the threshold, but only in areas where there is a delta
|
# then look for deltas above the threshold, but only in areas where there is a delta
|
||||||
# in the current frame. this prevents deltas from previous frames from being included
|
# in the current frame. this prevents deltas from previous frames from being included
|
||||||
|
|||||||
36
frigate/mqtt.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
|
||||||
|
import paho.mqtt.client as mqtt
|
||||||
|
|
||||||
|
from frigate.config import MqttConfig
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
def create_mqtt_client(config: MqttConfig):
|
||||||
|
client = mqtt.Client(client_id=config.client_id)
|
||||||
|
def on_connect(client, userdata, flags, rc):
|
||||||
|
threading.current_thread().name = "mqtt"
|
||||||
|
if rc != 0:
|
||||||
|
if rc == 3:
|
||||||
|
logger.error("MQTT Server unavailable")
|
||||||
|
elif rc == 4:
|
||||||
|
logger.error("MQTT Bad username or password")
|
||||||
|
elif rc == 5:
|
||||||
|
logger.error("MQTT Not authorized")
|
||||||
|
else:
|
||||||
|
logger.error("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
|
||||||
|
|
||||||
|
logger.info("MQTT connected")
|
||||||
|
client.publish(config.topic_prefix+'/available', 'online', retain=True)
|
||||||
|
client.on_connect = on_connect
|
||||||
|
client.will_set(config.topic_prefix+'/available', payload='offline', qos=1, retain=True)
|
||||||
|
if not config.user is None:
|
||||||
|
client.username_pw_set(config.user, password=config.password)
|
||||||
|
try:
|
||||||
|
client.connect(config.host, config.port, 60)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unable to connect to MQTT server: {e}")
|
||||||
|
raise
|
||||||
|
client.loop_start()
|
||||||
|
return client
|
||||||
@@ -1,20 +1,28 @@
|
|||||||
import json
|
import copy
|
||||||
import hashlib
|
import base64
|
||||||
import datetime
|
import datetime
|
||||||
import time
|
import hashlib
|
||||||
import copy
|
|
||||||
import cv2
|
|
||||||
import threading
|
|
||||||
import queue
|
|
||||||
import copy
|
|
||||||
import numpy as np
|
|
||||||
from collections import Counter, defaultdict
|
|
||||||
import itertools
|
import itertools
|
||||||
import matplotlib.pyplot as plt
|
import json
|
||||||
from frigate.util import draw_box_with_label, SharedMemoryFrameManager
|
import logging
|
||||||
from frigate.edgetpu import load_labels
|
import os
|
||||||
from typing import Callable, Dict
|
import queue
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from collections import Counter, defaultdict
|
||||||
from statistics import mean, median
|
from statistics import mean, median
|
||||||
|
from typing import Callable, Dict
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from frigate.config import FrigateConfig, CameraConfig
|
||||||
|
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
|
||||||
|
from frigate.edgetpu import load_labels
|
||||||
|
from frigate.util import SharedMemoryFrameManager, draw_box_with_label
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
PATH_TO_LABELS = '/labelmap.txt'
|
PATH_TO_LABELS = '/labelmap.txt'
|
||||||
|
|
||||||
@@ -25,24 +33,201 @@ COLOR_MAP = {}
|
|||||||
for key, val in LABELS.items():
|
for key, val in LABELS.items():
|
||||||
COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
|
COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
|
||||||
|
|
||||||
def zone_filtered(obj, object_config):
|
def on_edge(box, frame_shape):
|
||||||
object_name = obj['label']
|
if (
|
||||||
|
box[0] == 0 or
|
||||||
|
box[1] == 0 or
|
||||||
|
box[2] == frame_shape[1]-1 or
|
||||||
|
box[3] == frame_shape[0]-1
|
||||||
|
):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def is_better_thumbnail(current_thumb, new_obj, frame_shape) -> bool:
|
||||||
|
# larger is better
|
||||||
|
# cutoff images are less ideal, but they should also be smaller?
|
||||||
|
# better scores are obviously better too
|
||||||
|
|
||||||
|
# if the new_thumb is on an edge, and the current thumb is not
|
||||||
|
if on_edge(new_obj['box'], frame_shape) and not on_edge(current_thumb['box'], frame_shape):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# if the score is better by more than 5%
|
||||||
|
if new_obj['score'] > current_thumb['score']+.05:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# if the area is 10% larger
|
||||||
|
if new_obj['area'] > current_thumb['area']*1.1:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
class TrackedObject():
|
||||||
|
def __init__(self, camera, camera_config: CameraConfig, frame_cache, obj_data):
|
||||||
|
self.obj_data = obj_data
|
||||||
|
self.camera = camera
|
||||||
|
self.camera_config = camera_config
|
||||||
|
self.frame_cache = frame_cache
|
||||||
|
self.current_zones = []
|
||||||
|
self.entered_zones = set()
|
||||||
|
self.false_positive = True
|
||||||
|
self.top_score = self.computed_score = 0.0
|
||||||
|
self.thumbnail_data = None
|
||||||
|
self.frame = None
|
||||||
|
self.previous = None
|
||||||
|
self._snapshot_jpg_time = 0
|
||||||
|
ret, jpg = cv2.imencode('.jpg', np.zeros((300,300,3), np.uint8))
|
||||||
|
self._snapshot_jpg = jpg.tobytes()
|
||||||
|
|
||||||
|
# start the score history
|
||||||
|
self.score_history = [self.obj_data['score']]
|
||||||
|
|
||||||
|
def _is_false_positive(self):
|
||||||
|
# once a true positive, always a true positive
|
||||||
|
if not self.false_positive:
|
||||||
|
return False
|
||||||
|
|
||||||
|
threshold = self.camera_config.objects.filters[self.obj_data['label']].threshold
|
||||||
|
if self.computed_score < threshold:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def compute_score(self):
|
||||||
|
scores = self.score_history[:]
|
||||||
|
# pad with zeros if you dont have at least 3 scores
|
||||||
|
if len(scores) < 3:
|
||||||
|
scores += [0.0]*(3 - len(scores))
|
||||||
|
return median(scores)
|
||||||
|
|
||||||
|
def update(self, current_frame_time, obj_data):
|
||||||
|
previous = self.to_dict()
|
||||||
|
self.obj_data.update(obj_data)
|
||||||
|
# if the object is not in the current frame, add a 0.0 to the score history
|
||||||
|
if self.obj_data['frame_time'] != current_frame_time:
|
||||||
|
self.score_history.append(0.0)
|
||||||
|
else:
|
||||||
|
self.score_history.append(self.obj_data['score'])
|
||||||
|
# only keep the last 10 scores
|
||||||
|
if len(self.score_history) > 10:
|
||||||
|
self.score_history = self.score_history[-10:]
|
||||||
|
|
||||||
|
# calculate if this is a false positive
|
||||||
|
self.computed_score = self.compute_score()
|
||||||
|
if self.computed_score > self.top_score:
|
||||||
|
self.top_score = self.computed_score
|
||||||
|
self.false_positive = self._is_false_positive()
|
||||||
|
|
||||||
|
if not self.false_positive:
|
||||||
|
# determine if this frame is a better thumbnail
|
||||||
|
if (
|
||||||
|
self.thumbnail_data is None
|
||||||
|
or is_better_thumbnail(self.thumbnail_data, self.obj_data, self.camera_config.frame_shape)
|
||||||
|
):
|
||||||
|
self.thumbnail_data = {
|
||||||
|
'frame_time': self.obj_data['frame_time'],
|
||||||
|
'box': self.obj_data['box'],
|
||||||
|
'area': self.obj_data['area'],
|
||||||
|
'region': self.obj_data['region'],
|
||||||
|
'score': self.obj_data['score']
|
||||||
|
}
|
||||||
|
self.previous = previous
|
||||||
|
|
||||||
|
# check zones
|
||||||
|
current_zones = []
|
||||||
|
bottom_center = (self.obj_data['centroid'][0], self.obj_data['box'][3])
|
||||||
|
# check each zone
|
||||||
|
for name, zone in self.camera_config.zones.items():
|
||||||
|
contour = zone.contour
|
||||||
|
# check if the object is in the zone
|
||||||
|
if (cv2.pointPolygonTest(contour, bottom_center, False) >= 0):
|
||||||
|
# if the object passed the filters once, dont apply again
|
||||||
|
if name in self.current_zones or not zone_filtered(self, zone.filters):
|
||||||
|
current_zones.append(name)
|
||||||
|
self.entered_zones.add(name)
|
||||||
|
|
||||||
|
self.current_zones = current_zones
|
||||||
|
|
||||||
|
def to_dict(self, include_thumbnail: bool = False):
|
||||||
|
return {
|
||||||
|
'id': self.obj_data['id'],
|
||||||
|
'camera': self.camera,
|
||||||
|
'frame_time': self.obj_data['frame_time'],
|
||||||
|
'label': self.obj_data['label'],
|
||||||
|
'top_score': self.top_score,
|
||||||
|
'false_positive': self.false_positive,
|
||||||
|
'start_time': self.obj_data['start_time'],
|
||||||
|
'end_time': self.obj_data.get('end_time', None),
|
||||||
|
'score': self.obj_data['score'],
|
||||||
|
'box': self.obj_data['box'],
|
||||||
|
'area': self.obj_data['area'],
|
||||||
|
'region': self.obj_data['region'],
|
||||||
|
'current_zones': self.current_zones.copy(),
|
||||||
|
'entered_zones': list(self.entered_zones).copy(),
|
||||||
|
'thumbnail': base64.b64encode(self.get_jpg_bytes()).decode('utf-8') if include_thumbnail else None
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_jpg_bytes(self):
|
||||||
|
if self.thumbnail_data is None or self._snapshot_jpg_time == self.thumbnail_data['frame_time']:
|
||||||
|
return self._snapshot_jpg
|
||||||
|
|
||||||
|
if not self.thumbnail_data['frame_time'] in self.frame_cache:
|
||||||
|
logger.error(f"Unable to create thumbnail for {self.obj_data['id']}")
|
||||||
|
logger.error(f"Looking for frame_time of {self.thumbnail_data['frame_time']}")
|
||||||
|
logger.error(f"Thumbnail frames: {','.join([str(k) for k in self.frame_cache.keys()])}")
|
||||||
|
return self._snapshot_jpg
|
||||||
|
|
||||||
|
# TODO: crop first to avoid converting the entire frame?
|
||||||
|
snapshot_config = self.camera_config.snapshots
|
||||||
|
best_frame = cv2.cvtColor(self.frame_cache[self.thumbnail_data['frame_time']], cv2.COLOR_YUV2BGR_I420)
|
||||||
|
|
||||||
|
if snapshot_config.draw_bounding_boxes:
|
||||||
|
thickness = 2
|
||||||
|
color = COLOR_MAP[self.obj_data['label']]
|
||||||
|
box = self.thumbnail_data['box']
|
||||||
|
draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], self.obj_data['label'],
|
||||||
|
f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}", thickness=thickness, color=color)
|
||||||
|
|
||||||
|
if snapshot_config.crop_to_region:
|
||||||
|
region = self.thumbnail_data['region']
|
||||||
|
best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
|
||||||
|
|
||||||
|
if snapshot_config.height:
|
||||||
|
height = snapshot_config.height
|
||||||
|
width = int(height*best_frame.shape[1]/best_frame.shape[0])
|
||||||
|
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||||
|
|
||||||
|
if snapshot_config.show_timestamp:
|
||||||
|
time_to_show = datetime.datetime.fromtimestamp(self.thumbnail_data['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
|
||||||
|
size = cv2.getTextSize(time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2)
|
||||||
|
text_width = size[0][0]
|
||||||
|
desired_size = max(150, 0.33*best_frame.shape[1])
|
||||||
|
font_scale = desired_size/text_width
|
||||||
|
cv2.putText(best_frame, time_to_show, (5, best_frame.shape[0]-7), cv2.FONT_HERSHEY_SIMPLEX,
|
||||||
|
fontScale=font_scale, color=(255, 255, 255), thickness=2)
|
||||||
|
|
||||||
|
ret, jpg = cv2.imencode('.jpg', best_frame)
|
||||||
|
if ret:
|
||||||
|
self._snapshot_jpg = jpg.tobytes()
|
||||||
|
|
||||||
|
return self._snapshot_jpg
|
||||||
|
|
||||||
|
def zone_filtered(obj: TrackedObject, object_config):
|
||||||
|
object_name = obj.obj_data['label']
|
||||||
|
|
||||||
if object_name in object_config:
|
if object_name in object_config:
|
||||||
obj_settings = object_config[object_name]
|
obj_settings = object_config[object_name]
|
||||||
|
|
||||||
# if the min area is larger than the
|
# if the min area is larger than the
|
||||||
# detected object, don't add it to detected objects
|
# detected object, don't add it to detected objects
|
||||||
if obj_settings.get('min_area',-1) > obj['area']:
|
if obj_settings.min_area > obj.obj_data['area']:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# if the detected object is larger than the
|
# if the detected object is larger than the
|
||||||
# max area, don't add it to detected objects
|
# max area, don't add it to detected objects
|
||||||
if obj_settings.get('max_area', 24000000) < obj['area']:
|
if obj_settings.max_area < obj.obj_data['area']:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# if the score is lower than the threshold, skip
|
# if the score is lower than the threshold, skip
|
||||||
if obj_settings.get('threshold', 0) > obj['computed_score']:
|
if obj_settings.threshold > obj.computed_score:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
@@ -52,13 +237,14 @@ class CameraState():
|
|||||||
def __init__(self, name, config, frame_manager):
|
def __init__(self, name, config, frame_manager):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.config = config
|
self.config = config
|
||||||
|
self.camera_config = config.cameras[name]
|
||||||
self.frame_manager = frame_manager
|
self.frame_manager = frame_manager
|
||||||
|
self.best_objects: Dict[str, TrackedObject] = {}
|
||||||
self.best_objects = {}
|
self.object_counts = defaultdict(lambda: 0)
|
||||||
self.object_status = defaultdict(lambda: 'OFF')
|
self.tracked_objects: Dict[str, TrackedObject] = {}
|
||||||
self.tracked_objects = {}
|
self.frame_cache = {}
|
||||||
self.zone_objects = defaultdict(lambda: [])
|
self.zone_objects = defaultdict(lambda: [])
|
||||||
self._current_frame = np.zeros(self.config['frame_shape'], np.uint8)
|
self._current_frame = np.zeros(self.camera_config.frame_shape_yuv, np.uint8)
|
||||||
self.current_frame_lock = threading.Lock()
|
self.current_frame_lock = threading.Lock()
|
||||||
self.current_frame_time = 0.0
|
self.current_frame_time = 0.0
|
||||||
self.previous_frame_id = None
|
self.previous_frame_id = None
|
||||||
@@ -68,7 +254,7 @@ class CameraState():
|
|||||||
with self.current_frame_lock:
|
with self.current_frame_lock:
|
||||||
frame_copy = np.copy(self._current_frame)
|
frame_copy = np.copy(self._current_frame)
|
||||||
frame_time = self.current_frame_time
|
frame_time = self.current_frame_time
|
||||||
tracked_objects = copy.deepcopy(self.tracked_objects)
|
tracked_objects = {k: v.to_dict() for k,v in self.tracked_objects.items()}
|
||||||
|
|
||||||
frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
|
frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
|
||||||
# draw on the frame
|
# draw on the frame
|
||||||
@@ -89,149 +275,113 @@ class CameraState():
|
|||||||
region = obj['region']
|
region = obj['region']
|
||||||
cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1)
|
cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1)
|
||||||
|
|
||||||
if self.config['snapshots']['show_timestamp']:
|
if self.camera_config.snapshots.show_timestamp:
|
||||||
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
|
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
|
||||||
cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
|
cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
|
||||||
|
|
||||||
if self.config['snapshots']['draw_zones']:
|
if self.camera_config.snapshots.draw_zones:
|
||||||
for name, zone in self.config['zones'].items():
|
for name, zone in self.camera_config.zones.items():
|
||||||
thickness = 8 if any([name in obj['zones'] for obj in tracked_objects.values()]) else 2
|
thickness = 8 if any([name in obj['current_zones'] for obj in tracked_objects.values()]) else 2
|
||||||
cv2.drawContours(frame_copy, [zone['contour']], -1, zone['color'], thickness)
|
cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
|
||||||
|
|
||||||
return frame_copy
|
return frame_copy
|
||||||
|
|
||||||
def false_positive(self, obj):
|
def finished(self, obj_id):
|
||||||
# once a true positive, always a true positive
|
del self.tracked_objects[obj_id]
|
||||||
if not obj.get('false_positive', True):
|
|
||||||
return False
|
|
||||||
|
|
||||||
threshold = self.config['objects'].get('filters', {}).get(obj['label'], {}).get('threshold', 0.85)
|
|
||||||
if obj['computed_score'] < threshold:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def compute_score(self, obj):
|
|
||||||
scores = obj['score_history'][:]
|
|
||||||
# pad with zeros if you dont have at least 3 scores
|
|
||||||
if len(scores) < 3:
|
|
||||||
scores += [0.0]*(3 - len(scores))
|
|
||||||
return median(scores)
|
|
||||||
|
|
||||||
def on(self, event_type: str, callback: Callable[[Dict], None]):
|
def on(self, event_type: str, callback: Callable[[Dict], None]):
|
||||||
self.callbacks[event_type].append(callback)
|
self.callbacks[event_type].append(callback)
|
||||||
|
|
||||||
def update(self, frame_time, tracked_objects):
|
def update(self, frame_time, current_detections):
|
||||||
self.current_frame_time = frame_time
|
self.current_frame_time = frame_time
|
||||||
# get the new frame and delete the old frame
|
# get the new frame
|
||||||
frame_id = f"{self.name}{frame_time}"
|
frame_id = f"{self.name}{frame_time}"
|
||||||
current_frame = self.frame_manager.get(frame_id, (self.config['frame_shape'][0]*3//2, self.config['frame_shape'][1]))
|
current_frame = self.frame_manager.get(frame_id, self.camera_config.frame_shape_yuv)
|
||||||
|
|
||||||
current_ids = tracked_objects.keys()
|
current_ids = current_detections.keys()
|
||||||
previous_ids = self.tracked_objects.keys()
|
previous_ids = self.tracked_objects.keys()
|
||||||
removed_ids = list(set(previous_ids).difference(current_ids))
|
removed_ids = list(set(previous_ids).difference(current_ids))
|
||||||
new_ids = list(set(current_ids).difference(previous_ids))
|
new_ids = list(set(current_ids).difference(previous_ids))
|
||||||
updated_ids = list(set(current_ids).intersection(previous_ids))
|
updated_ids = list(set(current_ids).intersection(previous_ids))
|
||||||
|
|
||||||
for id in new_ids:
|
for id in new_ids:
|
||||||
self.tracked_objects[id] = tracked_objects[id]
|
new_obj = self.tracked_objects[id] = TrackedObject(self.name, self.camera_config, self.frame_cache, current_detections[id])
|
||||||
self.tracked_objects[id]['zones'] = []
|
|
||||||
|
|
||||||
# start the score history
|
|
||||||
self.tracked_objects[id]['score_history'] = [self.tracked_objects[id]['score']]
|
|
||||||
|
|
||||||
# calculate if this is a false positive
|
|
||||||
self.tracked_objects[id]['computed_score'] = self.compute_score(self.tracked_objects[id])
|
|
||||||
self.tracked_objects[id]['false_positive'] = self.false_positive(self.tracked_objects[id])
|
|
||||||
|
|
||||||
# call event handlers
|
# call event handlers
|
||||||
for c in self.callbacks['start']:
|
for c in self.callbacks['start']:
|
||||||
c(self.name, tracked_objects[id])
|
c(self.name, new_obj, frame_time)
|
||||||
|
|
||||||
for id in updated_ids:
|
for id in updated_ids:
|
||||||
self.tracked_objects[id].update(tracked_objects[id])
|
updated_obj = self.tracked_objects[id]
|
||||||
|
updated_obj.update(frame_time, current_detections[id])
|
||||||
|
|
||||||
# if the object is not in the current frame, add a 0.0 to the score history
|
if (not updated_obj.false_positive
|
||||||
if self.tracked_objects[id]['frame_time'] != self.current_frame_time:
|
and updated_obj.thumbnail_data['frame_time'] == frame_time
|
||||||
self.tracked_objects[id]['score_history'].append(0.0)
|
and frame_time not in self.frame_cache):
|
||||||
else:
|
self.frame_cache[frame_time] = np.copy(current_frame)
|
||||||
self.tracked_objects[id]['score_history'].append(self.tracked_objects[id]['score'])
|
|
||||||
# only keep the last 10 scores
|
|
||||||
if len(self.tracked_objects[id]['score_history']) > 10:
|
|
||||||
self.tracked_objects[id]['score_history'] = self.tracked_objects[id]['score_history'][-10:]
|
|
||||||
|
|
||||||
# calculate if this is a false positive
|
|
||||||
self.tracked_objects[id]['computed_score'] = self.compute_score(self.tracked_objects[id])
|
|
||||||
self.tracked_objects[id]['false_positive'] = self.false_positive(self.tracked_objects[id])
|
|
||||||
|
|
||||||
# call event handlers
|
# call event handlers
|
||||||
for c in self.callbacks['update']:
|
for c in self.callbacks['update']:
|
||||||
c(self.name, self.tracked_objects[id])
|
c(self.name, updated_obj, frame_time)
|
||||||
|
|
||||||
for id in removed_ids:
|
for id in removed_ids:
|
||||||
# publish events to mqtt
|
# publish events to mqtt
|
||||||
self.tracked_objects[id]['end_time'] = frame_time
|
removed_obj = self.tracked_objects[id]
|
||||||
for c in self.callbacks['end']:
|
if not 'end_time' in removed_obj.obj_data:
|
||||||
c(self.name, self.tracked_objects[id])
|
removed_obj.obj_data['end_time'] = frame_time
|
||||||
del self.tracked_objects[id]
|
for c in self.callbacks['end']:
|
||||||
|
c(self.name, removed_obj, frame_time)
|
||||||
# check to see if the objects are in any zones
|
|
||||||
for obj in self.tracked_objects.values():
|
|
||||||
current_zones = []
|
|
||||||
bottom_center = (obj['centroid'][0], obj['box'][3])
|
|
||||||
# check each zone
|
|
||||||
for name, zone in self.config['zones'].items():
|
|
||||||
contour = zone['contour']
|
|
||||||
# check if the object is in the zone and not filtered
|
|
||||||
if (cv2.pointPolygonTest(contour, bottom_center, False) >= 0
|
|
||||||
and not zone_filtered(obj, zone.get('filters', {}))):
|
|
||||||
current_zones.append(name)
|
|
||||||
obj['zones'] = current_zones
|
|
||||||
|
|
||||||
|
# TODO: can i switch to looking this up and only changing when an event ends?
|
||||||
# maintain best objects
|
# maintain best objects
|
||||||
for obj in self.tracked_objects.values():
|
for obj in self.tracked_objects.values():
|
||||||
object_type = obj['label']
|
object_type = obj.obj_data['label']
|
||||||
# if the object wasn't seen on the current frame, skip it
|
# if the object's thumbnail is not from the current frame
|
||||||
if obj['frame_time'] != self.current_frame_time or obj['false_positive']:
|
if obj.false_positive or obj.thumbnail_data['frame_time'] != self.current_frame_time:
|
||||||
continue
|
continue
|
||||||
obj_copy = copy.deepcopy(obj)
|
|
||||||
if object_type in self.best_objects:
|
if object_type in self.best_objects:
|
||||||
current_best = self.best_objects[object_type]
|
current_best = self.best_objects[object_type]
|
||||||
now = datetime.datetime.now().timestamp()
|
now = datetime.datetime.now().timestamp()
|
||||||
# if the object is a higher score than the current best score
|
# if the object is a higher score than the current best score
|
||||||
# or the current object is older than desired, use the new object
|
# or the current object is older than desired, use the new object
|
||||||
if obj_copy['score'] > current_best['score'] or (now - current_best['frame_time']) > self.config.get('best_image_timeout', 60):
|
if (is_better_thumbnail(current_best.thumbnail_data, obj.thumbnail_data, self.camera_config.frame_shape)
|
||||||
obj_copy['frame'] = np.copy(current_frame)
|
or (now - current_best.thumbnail_data['frame_time']) > self.camera_config.best_image_timeout):
|
||||||
self.best_objects[object_type] = obj_copy
|
self.best_objects[object_type] = obj
|
||||||
for c in self.callbacks['snapshot']:
|
for c in self.callbacks['snapshot']:
|
||||||
c(self.name, self.best_objects[object_type])
|
c(self.name, self.best_objects[object_type], frame_time)
|
||||||
else:
|
else:
|
||||||
obj_copy['frame'] = np.copy(current_frame)
|
self.best_objects[object_type] = obj
|
||||||
self.best_objects[object_type] = obj_copy
|
|
||||||
for c in self.callbacks['snapshot']:
|
for c in self.callbacks['snapshot']:
|
||||||
c(self.name, self.best_objects[object_type])
|
c(self.name, self.best_objects[object_type], frame_time)
|
||||||
|
|
||||||
# update overall camera state for each object type
|
# update overall camera state for each object type
|
||||||
obj_counter = Counter()
|
obj_counter = Counter()
|
||||||
for obj in self.tracked_objects.values():
|
for obj in self.tracked_objects.values():
|
||||||
if not obj['false_positive']:
|
if not obj.false_positive:
|
||||||
obj_counter[obj['label']] += 1
|
obj_counter[obj.obj_data['label']] += 1
|
||||||
|
|
||||||
# report on detected objects
|
# report on detected objects
|
||||||
for obj_name, count in obj_counter.items():
|
for obj_name, count in obj_counter.items():
|
||||||
new_status = 'ON' if count > 0 else 'OFF'
|
if count != self.object_counts[obj_name]:
|
||||||
if new_status != self.object_status[obj_name]:
|
self.object_counts[obj_name] = count
|
||||||
self.object_status[obj_name] = new_status
|
|
||||||
for c in self.callbacks['object_status']:
|
for c in self.callbacks['object_status']:
|
||||||
c(self.name, obj_name, new_status)
|
c(self.name, obj_name, count)
|
||||||
|
|
||||||
# expire any objects that are ON and no longer detected
|
# expire any objects that are >0 and no longer detected
|
||||||
expired_objects = [obj_name for obj_name, status in self.object_status.items() if status == 'ON' and not obj_name in obj_counter]
|
expired_objects = [obj_name for obj_name, count in self.object_counts.items() if count > 0 and not obj_name in obj_counter]
|
||||||
for obj_name in expired_objects:
|
for obj_name in expired_objects:
|
||||||
self.object_status[obj_name] = 'OFF'
|
self.object_counts[obj_name] = 0
|
||||||
for c in self.callbacks['object_status']:
|
for c in self.callbacks['object_status']:
|
||||||
c(self.name, obj_name, 'OFF')
|
c(self.name, obj_name, 0)
|
||||||
for c in self.callbacks['snapshot']:
|
for c in self.callbacks['snapshot']:
|
||||||
c(self.name, self.best_objects[obj_name])
|
c(self.name, self.best_objects[obj_name], frame_time)
|
||||||
|
|
||||||
|
# cleanup thumbnail frame cache
|
||||||
|
current_thumb_frames = set([obj.thumbnail_data['frame_time'] for obj in self.tracked_objects.values() if not obj.false_positive])
|
||||||
|
current_best_frames = set([obj.thumbnail_data['frame_time'] for obj in self.best_objects.values()])
|
||||||
|
thumb_frames_to_delete = [t for t in self.frame_cache.keys() if not t in current_thumb_frames and not t in current_best_frames]
|
||||||
|
for t in thumb_frames_to_delete:
|
||||||
|
del self.frame_cache[t]
|
||||||
|
|
||||||
with self.current_frame_lock:
|
with self.current_frame_lock:
|
||||||
self._current_frame = current_frame
|
self._current_frame = current_frame
|
||||||
@@ -240,51 +390,41 @@ class CameraState():
|
|||||||
self.previous_frame_id = frame_id
|
self.previous_frame_id = frame_id
|
||||||
|
|
||||||
class TrackedObjectProcessor(threading.Thread):
|
class TrackedObjectProcessor(threading.Thread):
|
||||||
def __init__(self, camera_config, client, topic_prefix, tracked_objects_queue, event_queue, stop_event):
|
def __init__(self, config: FrigateConfig, client, topic_prefix, tracked_objects_queue, event_queue, event_processed_queue, stop_event):
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
self.camera_config = camera_config
|
self.name = "detected_frames_processor"
|
||||||
|
self.config = config
|
||||||
self.client = client
|
self.client = client
|
||||||
self.topic_prefix = topic_prefix
|
self.topic_prefix = topic_prefix
|
||||||
self.tracked_objects_queue = tracked_objects_queue
|
self.tracked_objects_queue = tracked_objects_queue
|
||||||
self.event_queue = event_queue
|
self.event_queue = event_queue
|
||||||
|
self.event_processed_queue = event_processed_queue
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
self.camera_states: Dict[str, CameraState] = {}
|
self.camera_states: Dict[str, CameraState] = {}
|
||||||
self.frame_manager = SharedMemoryFrameManager()
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
|
|
||||||
def start(camera, obj):
|
def start(camera, obj: TrackedObject, current_frame_time):
|
||||||
# publish events to mqtt
|
self.event_queue.put(('start', camera, obj.to_dict()))
|
||||||
self.client.publish(f"{self.topic_prefix}/{camera}/events/start", json.dumps(obj), retain=False)
|
|
||||||
self.event_queue.put(('start', camera, obj))
|
|
||||||
|
|
||||||
def update(camera, obj):
|
def update(camera, obj: TrackedObject, current_frame_time):
|
||||||
pass
|
if not obj.thumbnail_data is None and obj.thumbnail_data['frame_time'] == current_frame_time:
|
||||||
|
message = { 'before': obj.previous, 'after': obj.to_dict() }
|
||||||
|
self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
|
||||||
|
|
||||||
def end(camera, obj):
|
def end(camera, obj: TrackedObject, current_frame_time):
|
||||||
self.client.publish(f"{self.topic_prefix}/{camera}/events/end", json.dumps(obj), retain=False)
|
if not obj.false_positive:
|
||||||
self.event_queue.put(('end', camera, obj))
|
message = { 'before': obj.previous, 'after': obj.to_dict() }
|
||||||
|
self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
|
||||||
|
self.event_queue.put(('end', camera, obj.to_dict(include_thumbnail=True)))
|
||||||
|
|
||||||
def snapshot(camera, obj):
|
def snapshot(camera, obj: TrackedObject, current_frame_time):
|
||||||
if not 'frame' in obj:
|
self.client.publish(f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot", obj.get_jpg_bytes(), retain=True)
|
||||||
return
|
|
||||||
best_frame = cv2.cvtColor(obj['frame'], cv2.COLOR_YUV2BGR_I420)
|
|
||||||
mqtt_config = self.camera_config[camera].get('mqtt', {'crop_to_region': False})
|
|
||||||
if mqtt_config.get('crop_to_region'):
|
|
||||||
region = obj['region']
|
|
||||||
best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
|
|
||||||
if 'snapshot_height' in mqtt_config:
|
|
||||||
height = int(mqtt_config['snapshot_height'])
|
|
||||||
width = int(height*best_frame.shape[1]/best_frame.shape[0])
|
|
||||||
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
|
||||||
ret, jpg = cv2.imencode('.jpg', best_frame)
|
|
||||||
if ret:
|
|
||||||
jpg_bytes = jpg.tobytes()
|
|
||||||
self.client.publish(f"{self.topic_prefix}/{camera}/{obj['label']}/snapshot", jpg_bytes, retain=True)
|
|
||||||
|
|
||||||
def object_status(camera, object_name, status):
|
def object_status(camera, object_name, status):
|
||||||
self.client.publish(f"{self.topic_prefix}/{camera}/{object_name}", status, retain=False)
|
self.client.publish(f"{self.topic_prefix}/{camera}/{object_name}", status, retain=False)
|
||||||
|
|
||||||
for camera in self.camera_config.keys():
|
for camera in self.config.cameras.keys():
|
||||||
camera_state = CameraState(camera, self.camera_config[camera], self.frame_manager)
|
camera_state = CameraState(camera, self.config, self.frame_manager)
|
||||||
camera_state.on('start', start)
|
camera_state.on('start', start)
|
||||||
camera_state.on('update', update)
|
camera_state.on('update', update)
|
||||||
camera_state.on('end', end)
|
camera_state.on('end', end)
|
||||||
@@ -292,45 +432,24 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
camera_state.on('object_status', object_status)
|
camera_state.on('object_status', object_status)
|
||||||
self.camera_states[camera] = camera_state
|
self.camera_states[camera] = camera_state
|
||||||
|
|
||||||
self.camera_data = defaultdict(lambda: {
|
|
||||||
'best_objects': {},
|
|
||||||
'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')),
|
|
||||||
'tracked_objects': {},
|
|
||||||
'current_frame': np.zeros((720,1280,3), np.uint8),
|
|
||||||
'current_frame_time': 0.0,
|
|
||||||
'object_id': None
|
|
||||||
})
|
|
||||||
# {
|
# {
|
||||||
# 'zone_name': {
|
# 'zone_name': {
|
||||||
# 'person': ['camera_1', 'camera_2']
|
# 'person': {
|
||||||
|
# 'camera_1': 2,
|
||||||
|
# 'camera_2': 1
|
||||||
|
# }
|
||||||
# }
|
# }
|
||||||
# }
|
# }
|
||||||
self.zone_data = defaultdict(lambda: defaultdict(lambda: set()))
|
self.zone_data = defaultdict(lambda: defaultdict(lambda: {}))
|
||||||
|
|
||||||
# set colors for zones
|
|
||||||
all_zone_names = set([zone for config in self.camera_config.values() for zone in config['zones'].keys()])
|
|
||||||
zone_colors = {}
|
|
||||||
colors = plt.cm.get_cmap('tab10', len(all_zone_names))
|
|
||||||
for i, zone in enumerate(all_zone_names):
|
|
||||||
zone_colors[zone] = tuple(int(round(255 * c)) for c in colors(i)[:3])
|
|
||||||
|
|
||||||
# create zone contours
|
|
||||||
for camera_config in self.camera_config.values():
|
|
||||||
for zone_name, zone_config in camera_config['zones'].items():
|
|
||||||
zone_config['color'] = zone_colors[zone_name]
|
|
||||||
coordinates = zone_config['coordinates']
|
|
||||||
if isinstance(coordinates, list):
|
|
||||||
zone_config['contour'] = np.array([[int(p.split(',')[0]), int(p.split(',')[1])] for p in coordinates])
|
|
||||||
elif isinstance(coordinates, str):
|
|
||||||
points = coordinates.split(',')
|
|
||||||
zone_config['contour'] = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
|
|
||||||
else:
|
|
||||||
print(f"Unable to parse zone coordinates for {zone_name} - {camera}")
|
|
||||||
|
|
||||||
def get_best(self, camera, label):
|
def get_best(self, camera, label):
|
||||||
best_objects = self.camera_states[camera].best_objects
|
# TODO: need a lock here
|
||||||
if label in best_objects:
|
camera_state = self.camera_states[camera]
|
||||||
return best_objects[label]
|
if label in camera_state.best_objects:
|
||||||
|
best_obj = camera_state.best_objects[label]
|
||||||
|
best = best_obj.to_dict()
|
||||||
|
best['frame'] = camera_state.frame_cache[best_obj.thumbnail_data['frame_time']]
|
||||||
|
return best
|
||||||
else:
|
else:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
@@ -340,7 +459,7 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
def run(self):
|
def run(self):
|
||||||
while True:
|
while True:
|
||||||
if self.stop_event.is_set():
|
if self.stop_event.is_set():
|
||||||
print(f"Exiting object processor...")
|
logger.info(f"Exiting object processor...")
|
||||||
break
|
break
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -352,23 +471,32 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
|
|
||||||
camera_state.update(frame_time, current_tracked_objects)
|
camera_state.update(frame_time, current_tracked_objects)
|
||||||
|
|
||||||
# update zone status for each label
|
# update zone counts for each label
|
||||||
for zone in camera_state.config['zones'].keys():
|
# for each zone in the current camera
|
||||||
# get labels for current camera and all labels in current zone
|
for zone in self.config.cameras[camera].zones.keys():
|
||||||
labels_for_camera = set([obj['label'] for obj in camera_state.tracked_objects.values() if zone in obj['zones'] and not obj['false_positive']])
|
# count labels for the camera in the zone
|
||||||
labels_to_check = labels_for_camera | set(self.zone_data[zone].keys())
|
obj_counter = Counter()
|
||||||
# for each label in zone
|
for obj in camera_state.tracked_objects.values():
|
||||||
for label in labels_to_check:
|
if zone in obj.current_zones and not obj.false_positive:
|
||||||
camera_list = self.zone_data[zone][label]
|
obj_counter[obj.obj_data['label']] += 1
|
||||||
# remove or add the camera to the list for the current label
|
|
||||||
previous_state = len(camera_list) > 0
|
# update counts and publish status
|
||||||
if label in labels_for_camera:
|
for label in set(list(self.zone_data[zone].keys()) + list(obj_counter.keys())):
|
||||||
camera_list.add(camera_state.name)
|
# if we have previously published a count for this zone/label
|
||||||
elif camera_state.name in camera_list:
|
zone_label = self.zone_data[zone][label]
|
||||||
camera_list.remove(camera_state.name)
|
if camera in zone_label:
|
||||||
new_state = len(camera_list) > 0
|
current_count = sum(zone_label.values())
|
||||||
# if the value is changing, send over MQTT
|
zone_label[camera] = obj_counter[label] if label in obj_counter else 0
|
||||||
if previous_state == False and new_state == True:
|
new_count = sum(zone_label.values())
|
||||||
self.client.publish(f"{self.topic_prefix}/{zone}/{label}", 'ON', retain=False)
|
if new_count != current_count:
|
||||||
elif previous_state == True and new_state == False:
|
self.client.publish(f"{self.topic_prefix}/{zone}/{label}", new_count, retain=False)
|
||||||
self.client.publish(f"{self.topic_prefix}/{zone}/{label}", 'OFF', retain=False)
|
# if this is a new zone/label combo for this camera
|
||||||
|
else:
|
||||||
|
if label in obj_counter:
|
||||||
|
zone_label[camera] = obj_counter[label]
|
||||||
|
self.client.publish(f"{self.topic_prefix}/{zone}/{label}", obj_counter[label], retain=False)
|
||||||
|
|
||||||
|
# cleanup event finished queue
|
||||||
|
while not self.event_processed_queue.empty():
|
||||||
|
event_id, camera = self.event_processed_queue.get()
|
||||||
|
self.camera_states[camera].finished(event_id)
|
||||||
|
|||||||
@@ -1,16 +1,19 @@
|
|||||||
import time
|
|
||||||
import datetime
|
|
||||||
import threading
|
|
||||||
import cv2
|
|
||||||
import itertools
|
|
||||||
import copy
|
import copy
|
||||||
import numpy as np
|
import datetime
|
||||||
|
import itertools
|
||||||
|
import multiprocessing as mp
|
||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
import multiprocessing as mp
|
import threading
|
||||||
|
import time
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
from scipy.spatial import distance as dist
|
from scipy.spatial import distance as dist
|
||||||
from frigate.util import draw_box_with_label, calculate_region
|
|
||||||
|
from frigate.util import calculate_region, draw_box_with_label
|
||||||
|
|
||||||
|
|
||||||
class ObjectTracker():
|
class ObjectTracker():
|
||||||
def __init__(self, max_disappeared):
|
def __init__(self, max_disappeared):
|
||||||
@@ -23,7 +26,6 @@ class ObjectTracker():
|
|||||||
id = f"{obj['frame_time']}-{rand_id}"
|
id = f"{obj['frame_time']}-{rand_id}"
|
||||||
obj['id'] = id
|
obj['id'] = id
|
||||||
obj['start_time'] = obj['frame_time']
|
obj['start_time'] = obj['frame_time']
|
||||||
obj['top_score'] = obj['score']
|
|
||||||
self.tracked_objects[id] = obj
|
self.tracked_objects[id] = obj
|
||||||
self.disappeared[id] = 0
|
self.disappeared[id] = 0
|
||||||
|
|
||||||
@@ -34,8 +36,6 @@ class ObjectTracker():
|
|||||||
def update(self, id, new_obj):
|
def update(self, id, new_obj):
|
||||||
self.disappeared[id] = 0
|
self.disappeared[id] = 0
|
||||||
self.tracked_objects[id].update(new_obj)
|
self.tracked_objects[id].update(new_obj)
|
||||||
if self.tracked_objects[id]['score'] > self.tracked_objects[id]['top_score']:
|
|
||||||
self.tracked_objects[id]['top_score'] = self.tracked_objects[id]['score']
|
|
||||||
|
|
||||||
def match_and_update(self, frame_time, new_objects):
|
def match_and_update(self, frame_time, new_objects):
|
||||||
# group by name
|
# group by name
|
||||||
|
|||||||
125
frigate/record.py
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import queue
|
||||||
|
import subprocess as sp
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from collections import defaultdict
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import psutil
|
||||||
|
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
SECONDS_IN_DAY = 60 * 60 * 24
|
||||||
|
|
||||||
|
def remove_empty_directories(directory):
|
||||||
|
# list all directories recursively and sort them by path,
|
||||||
|
# longest first
|
||||||
|
paths = sorted(
|
||||||
|
[x[0] for x in os.walk(RECORD_DIR)],
|
||||||
|
key=lambda p: len(str(p)),
|
||||||
|
reverse=True,
|
||||||
|
)
|
||||||
|
for path in paths:
|
||||||
|
# don't delete the parent
|
||||||
|
if path == RECORD_DIR:
|
||||||
|
continue
|
||||||
|
if len(os.listdir(path)) == 0:
|
||||||
|
os.rmdir(path)
|
||||||
|
|
||||||
|
class RecordingMaintainer(threading.Thread):
|
||||||
|
def __init__(self, config: FrigateConfig, stop_event):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.name = 'recording_maint'
|
||||||
|
self.config = config
|
||||||
|
self.stop_event = stop_event
|
||||||
|
|
||||||
|
def move_files(self):
|
||||||
|
recordings = [d for d in os.listdir(RECORD_DIR) if os.path.isfile(os.path.join(RECORD_DIR, d)) and d.endswith(".mp4")]
|
||||||
|
|
||||||
|
files_in_use = []
|
||||||
|
for process in psutil.process_iter():
|
||||||
|
if process.name() != 'ffmpeg':
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
flist = process.open_files()
|
||||||
|
if flist:
|
||||||
|
for nt in flist:
|
||||||
|
if nt.path.startswith(RECORD_DIR):
|
||||||
|
files_in_use.append(nt.path.split('/')[-1])
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for f in recordings:
|
||||||
|
if f in files_in_use:
|
||||||
|
continue
|
||||||
|
|
||||||
|
camera = '-'.join(f.split('-')[:-1])
|
||||||
|
start_time = datetime.datetime.strptime(f.split('-')[-1].split('.')[0], '%Y%m%d%H%M%S')
|
||||||
|
|
||||||
|
ffprobe_cmd = " ".join([
|
||||||
|
'ffprobe',
|
||||||
|
'-v',
|
||||||
|
'error',
|
||||||
|
'-show_entries',
|
||||||
|
'format=duration',
|
||||||
|
'-of',
|
||||||
|
'default=noprint_wrappers=1:nokey=1',
|
||||||
|
f"{os.path.join(RECORD_DIR,f)}"
|
||||||
|
])
|
||||||
|
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
|
||||||
|
(output, err) = p.communicate()
|
||||||
|
p_status = p.wait()
|
||||||
|
if p_status == 0:
|
||||||
|
duration = float(output.decode('utf-8').strip())
|
||||||
|
else:
|
||||||
|
logger.info(f"bad file: {f}")
|
||||||
|
os.remove(os.path.join(RECORD_DIR,f))
|
||||||
|
continue
|
||||||
|
|
||||||
|
directory = os.path.join(RECORD_DIR, start_time.strftime('%Y-%m/%d/%H'), camera)
|
||||||
|
|
||||||
|
if not os.path.exists(directory):
|
||||||
|
os.makedirs(directory)
|
||||||
|
|
||||||
|
file_name = f"{start_time.strftime('%M.%S.mp4')}"
|
||||||
|
|
||||||
|
os.rename(os.path.join(RECORD_DIR,f), os.path.join(directory,file_name))
|
||||||
|
|
||||||
|
def expire_files(self):
|
||||||
|
delete_before = {}
|
||||||
|
for name, camera in self.config.cameras.items():
|
||||||
|
delete_before[name] = datetime.datetime.now().timestamp() - SECONDS_IN_DAY*camera.record.retain_days
|
||||||
|
|
||||||
|
for p in Path('/media/frigate/recordings').rglob("*.mp4"):
|
||||||
|
if not p.parent in delete_before:
|
||||||
|
continue
|
||||||
|
if p.stat().st_mtime < delete_before[p.parent]:
|
||||||
|
p.unlink(missing_ok=True)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
counter = 0
|
||||||
|
self.expire_files()
|
||||||
|
while(True):
|
||||||
|
if self.stop_event.is_set():
|
||||||
|
logger.info(f"Exiting recording maintenance...")
|
||||||
|
break
|
||||||
|
|
||||||
|
# only expire events every 10 minutes, but check for new files every 10 seconds
|
||||||
|
time.sleep(10)
|
||||||
|
counter = counter + 1
|
||||||
|
if counter > 60:
|
||||||
|
self.expire_files()
|
||||||
|
remove_empty_directories(RECORD_DIR)
|
||||||
|
counter = 0
|
||||||
|
|
||||||
|
self.move_files()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
0
frigate/test/__init__.py
Normal file
319
frigate/test/test_config.py
Normal file
@@ -0,0 +1,319 @@
|
|||||||
|
import json
|
||||||
|
from unittest import TestCase, main
|
||||||
|
import voluptuous as vol
|
||||||
|
from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig
|
||||||
|
|
||||||
|
class TestConfig(TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.minimal = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
def test_empty(self):
|
||||||
|
FRIGATE_CONFIG_SCHEMA({})
|
||||||
|
|
||||||
|
def test_minimal(self):
|
||||||
|
FRIGATE_CONFIG_SCHEMA(self.minimal)
|
||||||
|
|
||||||
|
def test_config_class(self):
|
||||||
|
FrigateConfig(config=self.minimal)
|
||||||
|
|
||||||
|
def test_inherit_tracked_objects(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'objects': {
|
||||||
|
'track': ['person', 'dog']
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frigate_config = FrigateConfig(config=config)
|
||||||
|
assert('dog' in frigate_config.cameras['back'].objects.track)
|
||||||
|
|
||||||
|
def test_override_tracked_objects(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'objects': {
|
||||||
|
'track': ['person', 'dog']
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920,
|
||||||
|
'objects': {
|
||||||
|
'track': ['cat']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frigate_config = FrigateConfig(config=config)
|
||||||
|
assert('cat' in frigate_config.cameras['back'].objects.track)
|
||||||
|
|
||||||
|
def test_default_object_filters(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'objects': {
|
||||||
|
'track': ['person', 'dog']
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frigate_config = FrigateConfig(config=config)
|
||||||
|
assert('dog' in frigate_config.cameras['back'].objects.filters)
|
||||||
|
|
||||||
|
def test_inherit_object_filters(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'objects': {
|
||||||
|
'track': ['person', 'dog'],
|
||||||
|
'filters': {
|
||||||
|
'dog': {
|
||||||
|
'threshold': 0.7
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frigate_config = FrigateConfig(config=config)
|
||||||
|
assert('dog' in frigate_config.cameras['back'].objects.filters)
|
||||||
|
assert(frigate_config.cameras['back'].objects.filters['dog'].threshold == 0.7)
|
||||||
|
|
||||||
|
def test_override_object_filters(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920,
|
||||||
|
'objects': {
|
||||||
|
'track': ['person', 'dog'],
|
||||||
|
'filters': {
|
||||||
|
'dog': {
|
||||||
|
'threshold': 0.7
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frigate_config = FrigateConfig(config=config)
|
||||||
|
assert('dog' in frigate_config.cameras['back'].objects.filters)
|
||||||
|
assert(frigate_config.cameras['back'].objects.filters['dog'].threshold == 0.7)
|
||||||
|
|
||||||
|
def test_ffmpeg_params(self):
|
||||||
|
config = {
|
||||||
|
'ffmpeg': {
|
||||||
|
'input_args': ['-re']
|
||||||
|
},
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920,
|
||||||
|
'objects': {
|
||||||
|
'track': ['person', 'dog'],
|
||||||
|
'filters': {
|
||||||
|
'dog': {
|
||||||
|
'threshold': 0.7
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frigate_config = FrigateConfig(config=config)
|
||||||
|
assert('-re' in frigate_config.cameras['back'].ffmpeg_cmds[0]['cmd'])
|
||||||
|
|
||||||
|
def test_inherit_save_clips_retention(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'save_clips': {
|
||||||
|
'retain': {
|
||||||
|
'default': 20,
|
||||||
|
'objects': {
|
||||||
|
'person': 30
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frigate_config = FrigateConfig(config=config)
|
||||||
|
assert(frigate_config.cameras['back'].save_clips.retain.objects['person'] == 30)
|
||||||
|
|
||||||
|
def test_roles_listed_twice_throws_error(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'save_clips': {
|
||||||
|
'retain': {
|
||||||
|
'default': 20,
|
||||||
|
'objects': {
|
||||||
|
'person': 30
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] },
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video2', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.assertRaises(vol.MultipleInvalid, lambda: FrigateConfig(config=config))
|
||||||
|
|
||||||
|
def test_zone_matching_camera_name_throws_error(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'save_clips': {
|
||||||
|
'retain': {
|
||||||
|
'default': 20,
|
||||||
|
'objects': {
|
||||||
|
'person': 30
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920,
|
||||||
|
'zones': {
|
||||||
|
'back': {
|
||||||
|
'coordinates': '1,1,1,1,1,1'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.assertRaises(vol.MultipleInvalid, lambda: FrigateConfig(config=config))
|
||||||
|
|
||||||
|
def test_save_clips_should_default_to_global_objects(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'save_clips': {
|
||||||
|
'retain': {
|
||||||
|
'default': 20,
|
||||||
|
'objects': {
|
||||||
|
'person': 30
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'objects': {
|
||||||
|
'track': ['person', 'dog']
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920,
|
||||||
|
'save_clips': {
|
||||||
|
'enabled': True
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
config = FrigateConfig(config=config)
|
||||||
|
assert(len(config.cameras['back'].save_clips.objects) == 2)
|
||||||
|
assert('dog' in config.cameras['back'].save_clips.objects)
|
||||||
|
assert('person' in config.cameras['back'].save_clips.objects)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main(verbosity=2)
|
||||||
@@ -1,17 +1,21 @@
|
|||||||
from abc import ABC, abstractmethod
|
|
||||||
import datetime
|
|
||||||
import time
|
|
||||||
import signal
|
|
||||||
import traceback
|
|
||||||
import collections
|
import collections
|
||||||
import numpy as np
|
import datetime
|
||||||
import cv2
|
|
||||||
import threading
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import hashlib
|
import hashlib
|
||||||
|
import json
|
||||||
|
import signal
|
||||||
|
import subprocess as sp
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
from multiprocessing import shared_memory
|
from multiprocessing import shared_memory
|
||||||
from typing import AnyStr
|
from typing import AnyStr
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'):
|
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'):
|
||||||
if color is None:
|
if color is None:
|
||||||
color = (0,0,255)
|
color = (0,0,255)
|
||||||
|
|||||||
348
frigate/video.py
@@ -1,57 +1,34 @@
|
|||||||
import os
|
import base64
|
||||||
import time
|
|
||||||
import datetime
|
|
||||||
import cv2
|
|
||||||
import queue
|
|
||||||
import threading
|
|
||||||
import ctypes
|
|
||||||
import multiprocessing as mp
|
|
||||||
import subprocess as sp
|
|
||||||
import numpy as np
|
|
||||||
import copy
|
import copy
|
||||||
|
import ctypes
|
||||||
|
import datetime
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
import base64
|
import logging
|
||||||
from typing import Dict, List
|
import multiprocessing as mp
|
||||||
|
import os
|
||||||
|
import queue
|
||||||
|
import subprocess as sp
|
||||||
|
import signal
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from frigate.util import draw_box_with_label, yuv_region_2_rgb, area, calculate_region, clipped, intersection_over_union, intersection, EventsPerSecond, listen, FrameManager, SharedMemoryFrameManager
|
from typing import Dict, List
|
||||||
from frigate.objects import ObjectTracker
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from frigate.config import CameraConfig
|
||||||
from frigate.edgetpu import RemoteObjectDetector
|
from frigate.edgetpu import RemoteObjectDetector
|
||||||
|
from frigate.log import LogPipe
|
||||||
from frigate.motion import MotionDetector
|
from frigate.motion import MotionDetector
|
||||||
|
from frigate.objects import ObjectTracker
|
||||||
|
from frigate.util import (EventsPerSecond, FrameManager,
|
||||||
|
SharedMemoryFrameManager, area, calculate_region,
|
||||||
|
clipped, draw_box_with_label, intersection,
|
||||||
|
intersection_over_union, listen, yuv_region_2_rgb)
|
||||||
|
|
||||||
def get_frame_shape(source):
|
logger = logging.getLogger(__name__)
|
||||||
ffprobe_cmd = " ".join([
|
|
||||||
'ffprobe',
|
|
||||||
'-v',
|
|
||||||
'panic',
|
|
||||||
'-show_error',
|
|
||||||
'-show_streams',
|
|
||||||
'-of',
|
|
||||||
'json',
|
|
||||||
'"'+source+'"'
|
|
||||||
])
|
|
||||||
print(ffprobe_cmd)
|
|
||||||
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
|
|
||||||
(output, err) = p.communicate()
|
|
||||||
p_status = p.wait()
|
|
||||||
info = json.loads(output)
|
|
||||||
print(info)
|
|
||||||
|
|
||||||
video_info = [s for s in info['streams'] if s['codec_type'] == 'video'][0]
|
|
||||||
|
|
||||||
if video_info['height'] != 0 and video_info['width'] != 0:
|
|
||||||
return (video_info['height'], video_info['width'], 3)
|
|
||||||
|
|
||||||
# fallback to using opencv if ffprobe didnt succeed
|
|
||||||
video = cv2.VideoCapture(source)
|
|
||||||
ret, frame = video.read()
|
|
||||||
frame_shape = frame.shape
|
|
||||||
video.release()
|
|
||||||
return frame_shape
|
|
||||||
|
|
||||||
def get_ffmpeg_input(ffmpeg_input):
|
|
||||||
frigate_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
|
|
||||||
return ffmpeg_input.format(**frigate_vars)
|
|
||||||
|
|
||||||
def filtered(obj, objects_to_track, object_filters, mask=None):
|
def filtered(obj, objects_to_track, object_filters, mask=None):
|
||||||
object_name = obj[0]
|
object_name = obj[0]
|
||||||
@@ -64,16 +41,16 @@ def filtered(obj, objects_to_track, object_filters, mask=None):
|
|||||||
|
|
||||||
# if the min area is larger than the
|
# if the min area is larger than the
|
||||||
# detected object, don't add it to detected objects
|
# detected object, don't add it to detected objects
|
||||||
if obj_settings.get('min_area',-1) > obj[3]:
|
if obj_settings.min_area > obj[3]:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# if the detected object is larger than the
|
# if the detected object is larger than the
|
||||||
# max area, don't add it to detected objects
|
# max area, don't add it to detected objects
|
||||||
if obj_settings.get('max_area', 24000000) < obj[3]:
|
if obj_settings.max_area < obj[3]:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# if the score is lower than the min_score, skip
|
# if the score is lower than the min_score, skip
|
||||||
if obj_settings.get('min_score', 0) > obj[1]:
|
if obj_settings.min_score > obj[1]:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# compute the coordinates of the object and make sure
|
# compute the coordinates of the object and make sure
|
||||||
@@ -97,120 +74,190 @@ def create_tensor_input(frame, region):
|
|||||||
# Expand dimensions since the model expects images to have shape: [1, 300, 300, 3]
|
# Expand dimensions since the model expects images to have shape: [1, 300, 300, 3]
|
||||||
return np.expand_dims(cropped_frame, axis=0)
|
return np.expand_dims(cropped_frame, axis=0)
|
||||||
|
|
||||||
def start_or_restart_ffmpeg(ffmpeg_cmd, frame_size, ffmpeg_process=None):
|
def stop_ffmpeg(ffmpeg_process, logger):
|
||||||
if not ffmpeg_process is None:
|
logger.info("Terminating the existing ffmpeg process...")
|
||||||
print("Terminating the existing ffmpeg process...")
|
ffmpeg_process.terminate()
|
||||||
ffmpeg_process.terminate()
|
try:
|
||||||
try:
|
logger.info("Waiting for ffmpeg to exit gracefully...")
|
||||||
print("Waiting for ffmpeg to exit gracefully...")
|
ffmpeg_process.communicate(timeout=30)
|
||||||
ffmpeg_process.communicate(timeout=30)
|
except sp.TimeoutExpired:
|
||||||
except sp.TimeoutExpired:
|
logger.info("FFmpeg didnt exit. Force killing...")
|
||||||
print("FFmpeg didnt exit. Force killing...")
|
ffmpeg_process.kill()
|
||||||
ffmpeg_process.kill()
|
ffmpeg_process.communicate()
|
||||||
ffmpeg_process.communicate()
|
ffmpeg_process = None
|
||||||
ffmpeg_process = None
|
|
||||||
|
|
||||||
print("Creating ffmpeg process...")
|
def start_or_restart_ffmpeg(ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None):
|
||||||
print(" ".join(ffmpeg_cmd))
|
if not ffmpeg_process is None:
|
||||||
process = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, stdin = sp.DEVNULL, bufsize=frame_size*10, start_new_session=True)
|
stop_ffmpeg(ffmpeg_process, logger)
|
||||||
|
|
||||||
|
if frame_size is None:
|
||||||
|
process = sp.Popen(ffmpeg_cmd, stdout = sp.DEVNULL, stderr=logpipe, stdin = sp.DEVNULL, start_new_session=True)
|
||||||
|
else:
|
||||||
|
process = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, stderr=logpipe, stdin = sp.DEVNULL, bufsize=frame_size*10, start_new_session=True)
|
||||||
return process
|
return process
|
||||||
|
|
||||||
def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: FrameManager,
|
def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: FrameManager,
|
||||||
frame_queue, take_frame: int, fps:EventsPerSecond, skipped_fps: EventsPerSecond,
|
frame_queue, fps:mp.Value, skipped_fps: mp.Value, current_frame: mp.Value):
|
||||||
stop_event: mp.Event, current_frame: mp.Value):
|
|
||||||
|
|
||||||
frame_num = 0
|
frame_size = frame_shape[0] * frame_shape[1]
|
||||||
frame_size = frame_shape[0] * frame_shape[1] * 3 // 2
|
frame_rate = EventsPerSecond()
|
||||||
skipped_fps.start()
|
frame_rate.start()
|
||||||
|
skipped_eps = EventsPerSecond()
|
||||||
|
skipped_eps.start()
|
||||||
while True:
|
while True:
|
||||||
if stop_event.is_set():
|
fps.value = frame_rate.eps()
|
||||||
print(f"{camera_name}: stop event set. exiting capture thread...")
|
skipped_fps = skipped_eps.eps()
|
||||||
break
|
|
||||||
|
|
||||||
frame_bytes = ffmpeg_process.stdout.read(frame_size)
|
|
||||||
current_frame.value = datetime.datetime.now().timestamp()
|
current_frame.value = datetime.datetime.now().timestamp()
|
||||||
|
frame_name = f"{camera_name}{current_frame.value}"
|
||||||
|
frame_buffer = frame_manager.create(frame_name, frame_size)
|
||||||
|
try:
|
||||||
|
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
|
||||||
|
except:
|
||||||
|
logger.info(f"{camera_name}: ffmpeg sent a broken frame. something is wrong.")
|
||||||
|
|
||||||
if len(frame_bytes) < frame_size:
|
if ffmpeg_process.poll() != None:
|
||||||
print(f"{camera_name}: ffmpeg sent a broken frame. something is wrong.")
|
logger.info(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
|
||||||
|
frame_manager.delete(frame_name)
|
||||||
|
break
|
||||||
|
|
||||||
if ffmpeg_process.poll() != None:
|
continue
|
||||||
print(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
continue
|
|
||||||
|
|
||||||
fps.update()
|
frame_rate.update()
|
||||||
|
|
||||||
frame_num += 1
|
|
||||||
if (frame_num % take_frame) != 0:
|
|
||||||
skipped_fps.update()
|
|
||||||
continue
|
|
||||||
|
|
||||||
# if the queue is full, skip this frame
|
# if the queue is full, skip this frame
|
||||||
if frame_queue.full():
|
if frame_queue.full():
|
||||||
skipped_fps.update()
|
skipped_eps.update()
|
||||||
|
frame_manager.delete(frame_name)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# put the frame in the frame manager
|
# close the frame
|
||||||
frame_buffer = frame_manager.create(f"{camera_name}{current_frame.value}", frame_size)
|
frame_manager.close(frame_name)
|
||||||
frame_buffer[:] = frame_bytes[:]
|
|
||||||
frame_manager.close(f"{camera_name}{current_frame.value}")
|
|
||||||
|
|
||||||
# add to the queue
|
# add to the queue
|
||||||
frame_queue.put(current_frame.value)
|
frame_queue.put(current_frame.value)
|
||||||
|
|
||||||
class CameraCapture(threading.Thread):
|
class CameraWatchdog(threading.Thread):
|
||||||
def __init__(self, name, ffmpeg_process, frame_shape, frame_queue, take_frame, fps, stop_event):
|
def __init__(self, camera_name, config, frame_queue, camera_fps, ffmpeg_pid, stop_event):
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
self.name = name
|
self.logger = logging.getLogger(f"watchdog.{camera_name}")
|
||||||
self.frame_shape = frame_shape
|
self.camera_name = camera_name
|
||||||
self.frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
|
self.config = config
|
||||||
|
self.capture_thread = None
|
||||||
|
self.ffmpeg_detect_process = None
|
||||||
|
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect", logging.ERROR)
|
||||||
|
self.ffmpeg_other_processes = []
|
||||||
|
self.camera_fps = camera_fps
|
||||||
|
self.ffmpeg_pid = ffmpeg_pid
|
||||||
|
self.frame_queue = frame_queue
|
||||||
|
self.frame_shape = self.config.frame_shape_yuv
|
||||||
|
self.frame_size = self.frame_shape[0] * self.frame_shape[1]
|
||||||
|
self.stop_event = stop_event
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.start_ffmpeg_detect()
|
||||||
|
|
||||||
|
for c in self.config.ffmpeg_cmds:
|
||||||
|
if 'detect' in c['roles']:
|
||||||
|
continue
|
||||||
|
logpipe = LogPipe(f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}", logging.ERROR)
|
||||||
|
self.ffmpeg_other_processes.append({
|
||||||
|
'cmd': c['cmd'],
|
||||||
|
'logpipe': logpipe,
|
||||||
|
'process': start_or_restart_ffmpeg(c['cmd'], self.logger, logpipe)
|
||||||
|
})
|
||||||
|
|
||||||
|
time.sleep(10)
|
||||||
|
while True:
|
||||||
|
if self.stop_event.is_set():
|
||||||
|
stop_ffmpeg(self.ffmpeg_detect_process, self.logger)
|
||||||
|
for p in self.ffmpeg_other_processes:
|
||||||
|
stop_ffmpeg(p['process'], self.logger)
|
||||||
|
p['logpipe'].close()
|
||||||
|
self.logpipe.close()
|
||||||
|
break
|
||||||
|
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
if not self.capture_thread.is_alive():
|
||||||
|
self.start_ffmpeg_detect()
|
||||||
|
elif now - self.capture_thread.current_frame.value > 20:
|
||||||
|
self.logger.info(f"No frames received from {self.camera_name} in 20 seconds. Exiting ffmpeg...")
|
||||||
|
self.ffmpeg_detect_process.terminate()
|
||||||
|
try:
|
||||||
|
self.logger.info("Waiting for ffmpeg to exit gracefully...")
|
||||||
|
self.ffmpeg_detect_process.communicate(timeout=30)
|
||||||
|
except sp.TimeoutExpired:
|
||||||
|
self.logger.info("FFmpeg didnt exit. Force killing...")
|
||||||
|
self.ffmpeg_detect_process.kill()
|
||||||
|
self.ffmpeg_detect_process.communicate()
|
||||||
|
|
||||||
|
for p in self.ffmpeg_other_processes:
|
||||||
|
poll = p['process'].poll()
|
||||||
|
if poll == None:
|
||||||
|
continue
|
||||||
|
p['process'] = start_or_restart_ffmpeg(p['cmd'], self.logger, p['logpipe'], ffmpeg_process=p['process'])
|
||||||
|
|
||||||
|
# wait a bit before checking again
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
def start_ffmpeg_detect(self):
|
||||||
|
ffmpeg_cmd = [c['cmd'] for c in self.config.ffmpeg_cmds if 'detect' in c['roles']][0]
|
||||||
|
self.ffmpeg_detect_process = start_or_restart_ffmpeg(ffmpeg_cmd, self.logger, self.logpipe, self.frame_size)
|
||||||
|
self.ffmpeg_pid.value = self.ffmpeg_detect_process.pid
|
||||||
|
self.capture_thread = CameraCapture(self.camera_name, self.ffmpeg_detect_process, self.frame_shape, self.frame_queue,
|
||||||
|
self.camera_fps)
|
||||||
|
self.capture_thread.start()
|
||||||
|
|
||||||
|
class CameraCapture(threading.Thread):
|
||||||
|
def __init__(self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.name = f"capture:{camera_name}"
|
||||||
|
self.camera_name = camera_name
|
||||||
|
self.frame_shape = frame_shape
|
||||||
self.frame_queue = frame_queue
|
self.frame_queue = frame_queue
|
||||||
self.take_frame = take_frame
|
|
||||||
self.fps = fps
|
self.fps = fps
|
||||||
self.skipped_fps = EventsPerSecond()
|
self.skipped_fps = EventsPerSecond()
|
||||||
self.frame_manager = SharedMemoryFrameManager()
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
self.ffmpeg_process = ffmpeg_process
|
self.ffmpeg_process = ffmpeg_process
|
||||||
self.current_frame = mp.Value('d', 0.0)
|
self.current_frame = mp.Value('d', 0.0)
|
||||||
self.last_frame = 0
|
self.last_frame = 0
|
||||||
self.stop_event = stop_event
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self.skipped_fps.start()
|
self.skipped_fps.start()
|
||||||
capture_frames(self.ffmpeg_process, self.name, self.frame_shape, self.frame_manager, self.frame_queue, self.take_frame,
|
capture_frames(self.ffmpeg_process, self.camera_name, self.frame_shape, self.frame_manager, self.frame_queue,
|
||||||
self.fps, self.skipped_fps, self.stop_event, self.current_frame)
|
self.fps, self.skipped_fps, self.current_frame)
|
||||||
|
|
||||||
def track_camera(name, config, frame_queue, frame_shape, detection_queue, result_connection, detected_objects_queue, fps, detection_fps, read_start, detection_frame, stop_event):
|
def capture_camera(name, config: CameraConfig, process_info):
|
||||||
print(f"Starting process for {name}: {os.getpid()}")
|
stop_event = mp.Event()
|
||||||
|
def receiveSignal(signalNumber, frame):
|
||||||
|
stop_event.set()
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, receiveSignal)
|
||||||
|
signal.signal(signal.SIGINT, receiveSignal)
|
||||||
|
|
||||||
|
frame_queue = process_info['frame_queue']
|
||||||
|
camera_watchdog = CameraWatchdog(name, config, frame_queue, process_info['camera_fps'], process_info['ffmpeg_pid'], stop_event)
|
||||||
|
camera_watchdog.start()
|
||||||
|
camera_watchdog.join()
|
||||||
|
|
||||||
|
def track_camera(name, config: CameraConfig, detection_queue, result_connection, detected_objects_queue, process_info):
|
||||||
|
stop_event = mp.Event()
|
||||||
|
def receiveSignal(signalNumber, frame):
|
||||||
|
stop_event.set()
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, receiveSignal)
|
||||||
|
signal.signal(signal.SIGINT, receiveSignal)
|
||||||
|
|
||||||
|
threading.current_thread().name = f"process:{name}"
|
||||||
listen()
|
listen()
|
||||||
|
|
||||||
detection_frame.value = 0.0
|
frame_queue = process_info['frame_queue']
|
||||||
|
|
||||||
# Merge the tracked object config with the global config
|
frame_shape = config.frame_shape
|
||||||
camera_objects_config = config.get('objects', {})
|
objects_to_track = config.objects.track
|
||||||
objects_to_track = camera_objects_config.get('track', [])
|
object_filters = config.objects.filters
|
||||||
object_filters = camera_objects_config.get('filters', {})
|
mask = config.mask
|
||||||
|
|
||||||
# load in the mask for object detection
|
|
||||||
if 'mask' in config:
|
|
||||||
if config['mask'].startswith('base64,'):
|
|
||||||
img = base64.b64decode(config['mask'][7:])
|
|
||||||
npimg = np.fromstring(img, dtype=np.uint8)
|
|
||||||
mask = cv2.imdecode(npimg, cv2.IMREAD_GRAYSCALE)
|
|
||||||
elif config['mask'].startswith('poly,'):
|
|
||||||
points = config['mask'].split(',')[1:]
|
|
||||||
contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
|
|
||||||
mask = np.zeros((frame_shape[0], frame_shape[1]), np.uint8)
|
|
||||||
mask[:] = 255
|
|
||||||
cv2.fillPoly(mask, pts=[contour], color=(0))
|
|
||||||
else:
|
|
||||||
mask = cv2.imread("/config/{}".format(config['mask']), cv2.IMREAD_GRAYSCALE)
|
|
||||||
else:
|
|
||||||
mask = None
|
|
||||||
|
|
||||||
if mask is None or mask.size == 0:
|
|
||||||
mask = np.zeros((frame_shape[0], frame_shape[1]), np.uint8)
|
|
||||||
mask[:] = 255
|
|
||||||
|
|
||||||
motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
|
motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
|
||||||
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection)
|
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection)
|
||||||
@@ -220,9 +267,9 @@ def track_camera(name, config, frame_queue, frame_shape, detection_queue, result
|
|||||||
frame_manager = SharedMemoryFrameManager()
|
frame_manager = SharedMemoryFrameManager()
|
||||||
|
|
||||||
process_frames(name, frame_queue, frame_shape, frame_manager, motion_detector, object_detector,
|
process_frames(name, frame_queue, frame_shape, frame_manager, motion_detector, object_detector,
|
||||||
object_tracker, detected_objects_queue, fps, detection_fps, detection_frame, objects_to_track, object_filters, mask, stop_event)
|
object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, mask, stop_event)
|
||||||
|
|
||||||
print(f"{name}: exiting subprocess")
|
logger.info(f"{name}: exiting subprocess")
|
||||||
|
|
||||||
def reduce_boxes(boxes):
|
def reduce_boxes(boxes):
|
||||||
if len(boxes) == 0:
|
if len(boxes) == 0:
|
||||||
@@ -256,35 +303,38 @@ def detect(object_detector, frame, region, objects_to_track, object_filters, mas
|
|||||||
def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
|
def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
|
||||||
frame_manager: FrameManager, motion_detector: MotionDetector,
|
frame_manager: FrameManager, motion_detector: MotionDetector,
|
||||||
object_detector: RemoteObjectDetector, object_tracker: ObjectTracker,
|
object_detector: RemoteObjectDetector, object_tracker: ObjectTracker,
|
||||||
detected_objects_queue: mp.Queue, fps: mp.Value, detection_fps: mp.Value, current_frame_time: mp.Value,
|
detected_objects_queue: mp.Queue, process_info: Dict,
|
||||||
objects_to_track: List[str], object_filters: Dict, mask, stop_event: mp.Event,
|
objects_to_track: List[str], object_filters, mask, stop_event,
|
||||||
exit_on_empty: bool = False):
|
exit_on_empty: bool = False):
|
||||||
|
|
||||||
|
fps = process_info['process_fps']
|
||||||
|
detection_fps = process_info['detection_fps']
|
||||||
|
current_frame_time = process_info['detection_frame']
|
||||||
|
|
||||||
fps_tracker = EventsPerSecond()
|
fps_tracker = EventsPerSecond()
|
||||||
fps_tracker.start()
|
fps_tracker.start()
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
if stop_event.is_set() or (exit_on_empty and frame_queue.empty()):
|
if stop_event.is_set():
|
||||||
print(f"Exiting track_objects...")
|
break
|
||||||
break
|
|
||||||
|
if exit_on_empty and frame_queue.empty():
|
||||||
|
logger.info(f"Exiting track_objects...")
|
||||||
|
break
|
||||||
|
|
||||||
try:
|
try:
|
||||||
frame_time = frame_queue.get(True, 10)
|
frame_time = frame_queue.get(True, 10)
|
||||||
except queue.Empty:
|
except queue.Empty:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
||||||
current_frame_time.value = frame_time
|
current_frame_time.value = frame_time
|
||||||
|
|
||||||
frame = frame_manager.get(f"{camera_name}{frame_time}", (frame_shape[0]*3//2, frame_shape[1]))
|
frame = frame_manager.get(f"{camera_name}{frame_time}", (frame_shape[0]*3//2, frame_shape[1]))
|
||||||
|
|
||||||
if frame is None:
|
if frame is None:
|
||||||
print(f"{camera_name}: frame {frame_time} is not in memory store.")
|
logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
fps_tracker.update()
|
|
||||||
fps.value = fps_tracker.eps()
|
|
||||||
|
|
||||||
# look for motion
|
# look for motion
|
||||||
motion_boxes = motion_detector.detect(frame)
|
motion_boxes = motion_detector.detect(frame)
|
||||||
|
|
||||||
@@ -355,9 +405,13 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
|
|||||||
# now that we have refined our detections, we need to track objects
|
# now that we have refined our detections, we need to track objects
|
||||||
object_tracker.match_and_update(frame_time, detections)
|
object_tracker.match_and_update(frame_time, detections)
|
||||||
|
|
||||||
# add to the queue
|
# add to the queue if not full
|
||||||
detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects))
|
if(detected_objects_queue.full()):
|
||||||
|
frame_manager.delete(f"{camera_name}{frame_time}")
|
||||||
detection_fps.value = object_detector.fps.eps()
|
continue
|
||||||
|
else:
|
||||||
frame_manager.close(f"{camera_name}{frame_time}")
|
fps_tracker.update()
|
||||||
|
fps.value = fps_tracker.eps()
|
||||||
|
detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects))
|
||||||
|
detection_fps.value = object_detector.fps.eps()
|
||||||
|
frame_manager.close(f"{camera_name}{frame_time}")
|
||||||
|
|||||||
36
frigate/watchdog.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
import datetime
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class FrigateWatchdog(threading.Thread):
|
||||||
|
def __init__(self, detectors, stop_event):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.name = 'frigate_watchdog'
|
||||||
|
self.detectors = detectors
|
||||||
|
self.stop_event = stop_event
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
time.sleep(10)
|
||||||
|
while True:
|
||||||
|
# wait a bit before checking
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
if self.stop_event.is_set():
|
||||||
|
logger.info(f"Exiting watchdog...")
|
||||||
|
break
|
||||||
|
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
# check the detection processes
|
||||||
|
for detector in self.detectors.values():
|
||||||
|
detection_start = detector.detection_start.value
|
||||||
|
if (detection_start > 0.0 and
|
||||||
|
now - detection_start > 10):
|
||||||
|
logger.info("Detection appears to be stuck. Restarting detection process")
|
||||||
|
detector.start_or_restart()
|
||||||
|
elif not detector.detect_process.is_alive():
|
||||||
|
logger.info("Detection appears to have stopped. Restarting detection process")
|
||||||
|
detector.start_or_restart()
|
||||||
58
frigate/zeroconf.py
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
import logging
|
||||||
|
import socket
|
||||||
|
|
||||||
|
from zeroconf import (
|
||||||
|
ServiceInfo,
|
||||||
|
NonUniqueNameException,
|
||||||
|
InterfaceChoice,
|
||||||
|
IPVersion,
|
||||||
|
Zeroconf,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
ZEROCONF_TYPE = "_frigate._tcp.local."
|
||||||
|
|
||||||
|
# Taken from: http://stackoverflow.com/a/11735897
|
||||||
|
def get_local_ip() -> str:
|
||||||
|
"""Try to determine the local IP address of the machine."""
|
||||||
|
try:
|
||||||
|
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||||
|
|
||||||
|
# Use Google Public DNS server to determine own IP
|
||||||
|
sock.connect(("8.8.8.8", 80))
|
||||||
|
|
||||||
|
return sock.getsockname()[0] # type: ignore
|
||||||
|
except OSError:
|
||||||
|
try:
|
||||||
|
return socket.gethostbyname(socket.gethostname())
|
||||||
|
except socket.gaierror:
|
||||||
|
return "127.0.0.1"
|
||||||
|
finally:
|
||||||
|
sock.close()
|
||||||
|
|
||||||
|
def broadcast_zeroconf(frigate_id):
|
||||||
|
zeroconf = Zeroconf(interfaces=InterfaceChoice.Default, ip_version=IPVersion.V4Only)
|
||||||
|
|
||||||
|
host_ip = get_local_ip()
|
||||||
|
|
||||||
|
try:
|
||||||
|
host_ip_pton = socket.inet_pton(socket.AF_INET, host_ip)
|
||||||
|
except OSError:
|
||||||
|
host_ip_pton = socket.inet_pton(socket.AF_INET6, host_ip)
|
||||||
|
|
||||||
|
info = ServiceInfo(
|
||||||
|
ZEROCONF_TYPE,
|
||||||
|
name=f"{frigate_id}.{ZEROCONF_TYPE}",
|
||||||
|
addresses=[host_ip_pton],
|
||||||
|
port=5000,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Starting Zeroconf broadcast")
|
||||||
|
try:
|
||||||
|
zeroconf.register_service(info)
|
||||||
|
except NonUniqueNameException:
|
||||||
|
logger.error(
|
||||||
|
"Frigate instance with identical name present in the local network"
|
||||||
|
)
|
||||||
|
return zeroconf
|
||||||
122
nginx/nginx.conf
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
worker_processes 1;
|
||||||
|
|
||||||
|
error_log /var/log/nginx/error.log warn;
|
||||||
|
pid /var/run/nginx.pid;
|
||||||
|
|
||||||
|
load_module "modules/ngx_rtmp_module.so";
|
||||||
|
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
http {
|
||||||
|
include /etc/nginx/mime.types;
|
||||||
|
default_type application/octet-stream;
|
||||||
|
|
||||||
|
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||||
|
'$status $body_bytes_sent "$http_referer" '
|
||||||
|
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||||
|
|
||||||
|
access_log /var/log/nginx/access.log main;
|
||||||
|
|
||||||
|
sendfile on;
|
||||||
|
|
||||||
|
keepalive_timeout 65;
|
||||||
|
|
||||||
|
upstream frigate_api {
|
||||||
|
server localhost:5001;
|
||||||
|
keepalive 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 5000;
|
||||||
|
|
||||||
|
location /stream/ {
|
||||||
|
add_header 'Cache-Control' 'no-cache';
|
||||||
|
add_header 'Access-Control-Allow-Origin' "$http_origin" always;
|
||||||
|
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||||
|
add_header 'Access-Control-Expose-Headers' 'Content-Length';
|
||||||
|
if ($request_method = 'OPTIONS') {
|
||||||
|
add_header 'Access-Control-Allow-Origin' "$http_origin";
|
||||||
|
add_header 'Access-Control-Max-Age' 1728000;
|
||||||
|
add_header 'Content-Type' 'text/plain charset=UTF-8';
|
||||||
|
add_header 'Content-Length' 0;
|
||||||
|
return 204;
|
||||||
|
}
|
||||||
|
|
||||||
|
types {
|
||||||
|
application/dash+xml mpd;
|
||||||
|
application/vnd.apple.mpegurl m3u8;
|
||||||
|
video/mp2t ts;
|
||||||
|
image/jpeg jpg;
|
||||||
|
}
|
||||||
|
|
||||||
|
root /tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /clips/ {
|
||||||
|
add_header 'Access-Control-Allow-Origin' "$http_origin" always;
|
||||||
|
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||||
|
add_header 'Access-Control-Expose-Headers' 'Content-Length';
|
||||||
|
if ($request_method = 'OPTIONS') {
|
||||||
|
add_header 'Access-Control-Allow-Origin' "$http_origin";
|
||||||
|
add_header 'Access-Control-Max-Age' 1728000;
|
||||||
|
add_header 'Content-Type' 'text/plain charset=UTF-8';
|
||||||
|
add_header 'Content-Length' 0;
|
||||||
|
return 204;
|
||||||
|
}
|
||||||
|
|
||||||
|
types {
|
||||||
|
video/mp4 mp4;
|
||||||
|
image/jpeg jpg;
|
||||||
|
}
|
||||||
|
|
||||||
|
autoindex on;
|
||||||
|
root /media/frigate;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /recordings/ {
|
||||||
|
add_header 'Access-Control-Allow-Origin' "$http_origin" always;
|
||||||
|
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||||
|
add_header 'Access-Control-Expose-Headers' 'Content-Length';
|
||||||
|
if ($request_method = 'OPTIONS') {
|
||||||
|
add_header 'Access-Control-Allow-Origin' "$http_origin";
|
||||||
|
add_header 'Access-Control-Max-Age' 1728000;
|
||||||
|
add_header 'Content-Type' 'text/plain charset=UTF-8';
|
||||||
|
add_header 'Content-Length' 0;
|
||||||
|
return 204;
|
||||||
|
}
|
||||||
|
|
||||||
|
types {
|
||||||
|
video/mp4 mp4;
|
||||||
|
}
|
||||||
|
|
||||||
|
autoindex on;
|
||||||
|
autoindex_format json;
|
||||||
|
root /media/frigate;
|
||||||
|
}
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://frigate_api/;
|
||||||
|
proxy_pass_request_headers on;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rtmp {
|
||||||
|
server {
|
||||||
|
listen 1935;
|
||||||
|
chunk_size 4096;
|
||||||
|
allow publish 127.0.0.1;
|
||||||
|
deny publish all;
|
||||||
|
allow play all;
|
||||||
|
application live {
|
||||||
|
live on;
|
||||||
|
record off;
|
||||||
|
meta copy;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||