forked from Github/frigate
Compare commits
66 Commits
v0.12.0-be
...
v0.12.0-rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7d589bd6e1 | ||
|
|
1bf3b83ef3 | ||
|
|
b61b6f46cd | ||
|
|
ac339d411c | ||
|
|
3f17f871fa | ||
|
|
e454daf727 | ||
|
|
732e527401 | ||
|
|
b44e6cd5dc | ||
|
|
2d9556f5f3 | ||
|
|
e82f72a9d3 | ||
|
|
ce2d589a28 | ||
|
|
750bf0e79a | ||
|
|
4dc6c93cdb | ||
|
|
f7e9507bee | ||
|
|
e8d8cc4f55 | ||
|
|
c20c982ad0 | ||
|
|
962bdc7fa5 | ||
|
|
a5e561c81d | ||
|
|
c4ebafe777 | ||
|
|
7ed715b371 | ||
|
|
161e7b3fd7 | ||
|
|
42eaa13402 | ||
|
|
17c26c9fa9 | ||
|
|
318240c14c | ||
|
|
34bdf2fc10 | ||
|
|
d97fa99ec5 | ||
|
|
9621b4b9a1 | ||
|
|
3611e874ca | ||
|
|
fbf29667d4 | ||
|
|
c13dd132ee | ||
|
|
3524d1a055 | ||
|
|
a8c567d877 | ||
|
|
80135342c2 | ||
|
|
c2b13fdbdf | ||
|
|
2797a60d4f | ||
|
|
0592c8b0e2 | ||
|
|
2b685ac343 | ||
|
|
c901707670 | ||
|
|
27d3676ba5 | ||
|
|
52459bf348 | ||
|
|
6cfa73a284 | ||
|
|
7b26935462 | ||
|
|
c9cd810c9f | ||
|
|
1715e2e09d | ||
|
|
b69c0daadb | ||
|
|
56d2978bc8 | ||
|
|
1ef109e171 | ||
|
|
08ab9dedf7 | ||
|
|
3d90366af2 | ||
|
|
c74c9ff161 | ||
|
|
27a31e731f | ||
|
|
562e2627c2 | ||
|
|
babd976533 | ||
|
|
748815b6ce | ||
|
|
88252e0ae6 | ||
|
|
c0bf69b4bf | ||
|
|
b6b10e753f | ||
|
|
4a45089b95 | ||
|
|
3b9bcb356b | ||
|
|
e10ddb343c | ||
|
|
e8cd25ddf2 | ||
|
|
624c314335 | ||
|
|
b33094207c | ||
|
|
7083a5c9b6 | ||
|
|
db131d4971 | ||
|
|
74d6ab0555 |
@@ -1,6 +1,6 @@
|
||||
name: EdgeTpu Support Request
|
||||
description: Support for setting up EdgeTPU in Frigate
|
||||
title: "[EdgeTPU Support]: "
|
||||
name: Detector Support Request
|
||||
description: Support for setting up object detector in Frigate (Coral, OpenVINO, TensorRT, etc.)
|
||||
title: "[Detector Support]: "
|
||||
labels: ["support", "triage"]
|
||||
assignees: []
|
||||
body:
|
||||
5
.github/workflows/ci.yml
vendored
5
.github/workflows/ci.yml
vendored
@@ -19,6 +19,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Image Build
|
||||
steps:
|
||||
- name: Remove unnecessary files
|
||||
run: |
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /opt/ghc
|
||||
- id: lowercaseRepo
|
||||
uses: ASzc/change-string-case-action@v5
|
||||
with:
|
||||
|
||||
@@ -27,7 +27,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
|
||||
FROM wget AS go2rtc
|
||||
ARG TARGETARCH
|
||||
WORKDIR /rootfs/usr/local/go2rtc/bin
|
||||
RUN wget -qO go2rtc "https://github.com/AlexxIT/go2rtc/releases/download/v1.1.1/go2rtc_linux_${TARGETARCH}" \
|
||||
RUN wget -qO go2rtc "https://github.com/AlexxIT/go2rtc/releases/download/v1.2.0/go2rtc_linux_${TARGETARCH}" \
|
||||
&& chmod +x go2rtc
|
||||
|
||||
|
||||
@@ -207,6 +207,10 @@ FROM deps AS devcontainer
|
||||
# But start a fake service for simulating the logs
|
||||
COPY docker/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run
|
||||
|
||||
# Create symbolic link to the frigate source code, as go2rtc's create_config.sh uses it
|
||||
RUN mkdir -p /opt/frigate \
|
||||
&& ln -svf /workspace/frigate/frigate /opt/frigate/frigate
|
||||
|
||||
# Install Node 16
|
||||
RUN apt-get update \
|
||||
&& apt-get install wget -y \
|
||||
|
||||
@@ -64,6 +64,9 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||
intel-opencl-icd \
|
||||
mesa-va-drivers libva-drm2 intel-media-va-driver-non-free i965-va-driver libmfx1 radeontop intel-gpu-tools
|
||||
# something about this dependency requires it to be installed in a separate call rather than in the line above
|
||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||
i965-va-driver-shaders
|
||||
rm -f /etc/apt/sources.list.d/debian-testing.list
|
||||
fi
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
s6_version="3.1.3.0"
|
||||
s6_version="3.1.4.1"
|
||||
|
||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
s6_arch="x86_64"
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
# Logs should be sent to stdout so that s6 can collect them
|
||||
|
||||
declare exit_code_container
|
||||
exit_code_container=$(cat /run/s6-linux-init-container-results/exitcode)
|
||||
readonly exit_code_container
|
||||
@@ -11,20 +13,16 @@ readonly exit_code_service="${1}"
|
||||
readonly exit_code_signal="${2}"
|
||||
readonly service="Frigate"
|
||||
|
||||
echo "Service ${service} exited with code ${exit_code_service} (by signal ${exit_code_signal})" >&2
|
||||
echo "[INFO] Service ${service} exited with code ${exit_code_service} (by signal ${exit_code_signal})"
|
||||
|
||||
if [[ "${exit_code_service}" -eq 256 ]]; then
|
||||
if [[ "${exit_code_container}" -eq 0 ]]; then
|
||||
echo $((128 + exit_code_signal)) > /run/s6-linux-init-container-results/exitcode
|
||||
echo $((128 + exit_code_signal)) >/run/s6-linux-init-container-results/exitcode
|
||||
fi
|
||||
elif [[ "${exit_code_service}" -ne 0 ]]; then
|
||||
if [[ "${exit_code_container}" -eq 0 ]]; then
|
||||
echo "${exit_code_service}" > /run/s6-linux-init-container-results/exitcode
|
||||
echo "${exit_code_service}" >/run/s6-linux-init-container-results/exitcode
|
||||
fi
|
||||
else
|
||||
# Exit code 0 is expected when Frigate is restarted by the user. In this case,
|
||||
# we create a signal for the go2rtc finish script to tolerate the restart.
|
||||
touch /dev/shm/restarting-frigate
|
||||
fi
|
||||
|
||||
exec /run/s6/basedir/bin/halt
|
||||
|
||||
@@ -4,12 +4,14 @@
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
# Logs should be sent to stdout so that s6 can collect them
|
||||
|
||||
# Tell S6-Overlay not to restart this service
|
||||
s6-svc -O .
|
||||
|
||||
echo "[INFO] Starting Frigate..." >&2
|
||||
echo "[INFO] Starting Frigate..."
|
||||
|
||||
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate" >&2
|
||||
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"
|
||||
|
||||
# Replace the bash process with the Frigate process, redirecting stderr to stdout
|
||||
exec 2>&1
|
||||
|
||||
12
docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/finish
Executable file
12
docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/finish
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
# Logs should be sent to stdout so that s6 can collect them
|
||||
|
||||
readonly exit_code_service="${1}"
|
||||
readonly exit_code_signal="${2}"
|
||||
readonly service="go2rtc-healthcheck"
|
||||
|
||||
echo "[INFO] The ${service} service exited with code ${exit_code_service} (by signal ${exit_code_signal})"
|
||||
@@ -0,0 +1 @@
|
||||
go2rtc-log
|
||||
22
docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/run
Executable file
22
docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/run
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
# Start the go2rtc-healthcheck service
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
# Logs should be sent to stdout so that s6 can collect them
|
||||
|
||||
# Give some additional time for go2rtc to start before start pinging
|
||||
sleep 10s
|
||||
echo "[INFO] Starting go2rtc healthcheck service..."
|
||||
|
||||
while sleep 30s; do
|
||||
# Check if the service is running
|
||||
if ! curl --connect-timeout 10 --fail --silent --show-error --output /dev/null http://127.0.0.1:1984/api/streams 2>&1; then
|
||||
echo "[ERROR] The go2rtc service is not responding to ping, restarting..."
|
||||
# We can also use -r instead of -t to send kill signal rather than term
|
||||
s6-svc -t /var/run/service/go2rtc 2>&1
|
||||
# Give some additional time to go2rtc to restart before start pinging again
|
||||
sleep 10s
|
||||
fi
|
||||
done
|
||||
@@ -0,0 +1 @@
|
||||
5000
|
||||
@@ -0,0 +1 @@
|
||||
longrun
|
||||
@@ -1 +1,2 @@
|
||||
go2rtc
|
||||
go2rtc-healthcheck
|
||||
|
||||
@@ -1,32 +1,12 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
# Take down the S6 supervision tree when the service exits
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
declare exit_code_container
|
||||
exit_code_container=$(cat /run/s6-linux-init-container-results/exitcode)
|
||||
readonly exit_code_container
|
||||
# Logs should be sent to stdout so that s6 can collect them
|
||||
|
||||
readonly exit_code_service="${1}"
|
||||
readonly exit_code_signal="${2}"
|
||||
readonly service="go2rtc"
|
||||
|
||||
echo "Service ${service} exited with code ${exit_code_service} (by signal ${exit_code_signal})" >&2
|
||||
|
||||
if [[ "${exit_code_service}" -eq 256 ]]; then
|
||||
if [[ "${exit_code_container}" -eq 0 ]]; then
|
||||
echo $((128 + exit_code_signal)) > /run/s6-linux-init-container-results/exitcode
|
||||
fi
|
||||
elif [[ "${exit_code_service}" -ne 0 ]]; then
|
||||
if [[ "${exit_code_container}" -eq 0 ]]; then
|
||||
echo "${exit_code_service}" > /run/s6-linux-init-container-results/exitcode
|
||||
fi
|
||||
else
|
||||
# go2rtc is not supposed to exit, so even when it exits with 0 we make the
|
||||
# container with 1. We only tolerate it when Frigate is restarting.
|
||||
if [[ "${exit_code_container}" -eq 0 && ! -f /dev/shm/restarting-frigate ]]; then
|
||||
echo "1" > /run/s6-linux-init-container-results/exitcode
|
||||
fi
|
||||
fi
|
||||
|
||||
exec /run/s6/basedir/bin/halt
|
||||
echo "[INFO] The ${service} service exited with code ${exit_code_service} (by signal ${exit_code_signal})"
|
||||
|
||||
@@ -4,8 +4,7 @@
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
# Tell S6-Overlay not to restart this service
|
||||
s6-svc -O .
|
||||
# Logs should be sent to stdout so that s6 can collect them
|
||||
|
||||
function get_ip_and_port_from_supervisor() {
|
||||
local ip_address
|
||||
@@ -19,9 +18,9 @@ function get_ip_and_port_from_supervisor() {
|
||||
jq --exit-status --raw-output '.data.ipv4.address[0]'
|
||||
) && [[ "${ip_address}" =~ ${ip_regex} ]]; then
|
||||
ip_address="${BASH_REMATCH[1]}"
|
||||
echo "[INFO] Got IP address from supervisor: ${ip_address}" >&2
|
||||
echo "[INFO] Got IP address from supervisor: ${ip_address}"
|
||||
else
|
||||
echo "[WARN] Failed to get IP address from supervisor" >&2
|
||||
echo "[WARN] Failed to get IP address from supervisor"
|
||||
return 0
|
||||
fi
|
||||
|
||||
@@ -35,26 +34,28 @@ function get_ip_and_port_from_supervisor() {
|
||||
jq --exit-status --raw-output '.data.network["8555/tcp"]'
|
||||
) && [[ "${webrtc_port}" =~ ${port_regex} ]]; then
|
||||
webrtc_port="${BASH_REMATCH[1]}"
|
||||
echo "[INFO] Got WebRTC port from supervisor: ${ip_address}" >&2
|
||||
echo "[INFO] Got WebRTC port from supervisor: ${webrtc_port}"
|
||||
else
|
||||
echo "[WARN] Failed to get WebRTC port from supervisor" >&2
|
||||
echo "[WARN] Failed to get WebRTC port from supervisor"
|
||||
return 0
|
||||
fi
|
||||
|
||||
export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}"
|
||||
}
|
||||
|
||||
echo "[INFO] Preparing go2rtc config..." >&2
|
||||
if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then
|
||||
echo "[INFO] Preparing go2rtc config..."
|
||||
|
||||
if [[ -n "${SUPERVISOR_TOKEN:-}" ]]; then
|
||||
# Running as a Home Assistant add-on, infer the IP address and port
|
||||
get_ip_and_port_from_supervisor
|
||||
if [[ -n "${SUPERVISOR_TOKEN:-}" ]]; then
|
||||
# Running as a Home Assistant add-on, infer the IP address and port
|
||||
get_ip_and_port_from_supervisor
|
||||
fi
|
||||
|
||||
python3 /usr/local/go2rtc/create_config.py
|
||||
fi
|
||||
|
||||
raw_config=$(python3 /usr/local/go2rtc/create_config.py)
|
||||
|
||||
echo "[INFO] Starting go2rtc..." >&2
|
||||
echo "[INFO] Starting go2rtc..."
|
||||
|
||||
# Replace the bash process with the go2rtc process, redirecting stderr to stdout
|
||||
exec 2>&1
|
||||
exec go2rtc -config="${raw_config}"
|
||||
exec go2rtc -config=/dev/shm/go2rtc.yaml
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
# Logs should be sent to stdout so that s6 can collect them
|
||||
|
||||
declare exit_code_container
|
||||
exit_code_container=$(cat /run/s6-linux-init-container-results/exitcode)
|
||||
readonly exit_code_container
|
||||
@@ -11,18 +13,18 @@ readonly exit_code_service="${1}"
|
||||
readonly exit_code_signal="${2}"
|
||||
readonly service="NGINX"
|
||||
|
||||
echo "Service ${service} exited with code ${exit_code_service} (by signal ${exit_code_signal})" >&2
|
||||
echo "[INFO] Service ${service} exited with code ${exit_code_service} (by signal ${exit_code_signal})"
|
||||
|
||||
if [[ "${exit_code_service}" -eq 256 ]]; then
|
||||
if [[ "${exit_code_container}" -eq 0 ]]; then
|
||||
echo $((128 + exit_code_signal)) > /run/s6-linux-init-container-results/exitcode
|
||||
echo $((128 + exit_code_signal)) >/run/s6-linux-init-container-results/exitcode
|
||||
fi
|
||||
if [[ "${exit_code_signal}" -eq 15 ]]; then
|
||||
exec /run/s6/basedir/bin/halt
|
||||
fi
|
||||
elif [[ "${exit_code_service}" -ne 0 ]]; then
|
||||
if [[ "${exit_code_container}" -eq 0 ]]; then
|
||||
echo "${exit_code_service}" > /run/s6-linux-init-container-results/exitcode
|
||||
echo "${exit_code_service}" >/run/s6-linux-init-container-results/exitcode
|
||||
fi
|
||||
exec /run/s6/basedir/bin/halt
|
||||
fi
|
||||
|
||||
@@ -4,7 +4,9 @@
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
echo "[INFO] Starting NGINX..." >&2
|
||||
# Logs should be sent to stdout so that s6 can collect them
|
||||
|
||||
echo "[INFO] Starting NGINX..."
|
||||
|
||||
# Replace the bash process with the NGINX process, redirecting stderr to stdout
|
||||
exec 2>&1
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
exec 2>&1
|
||||
exec python3 -u -m frigate "${@}"
|
||||
@@ -5,8 +5,13 @@ import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
sys.path.insert(0, "/opt/frigate")
|
||||
from frigate.const import BIRDSEYE_PIPE, BTBN_PATH
|
||||
from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode
|
||||
|
||||
sys.path.remove("/opt/frigate")
|
||||
|
||||
|
||||
BTBN_PATH = "/usr/lib/btbn-ffmpeg"
|
||||
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
|
||||
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
|
||||
|
||||
@@ -19,12 +24,18 @@ with open(config_file) as f:
|
||||
raw_config = f.read()
|
||||
|
||||
if config_file.endswith((".yaml", ".yml")):
|
||||
config = yaml.safe_load(raw_config)
|
||||
config: dict[str, any] = yaml.safe_load(raw_config)
|
||||
elif config_file.endswith(".json"):
|
||||
config = json.loads(raw_config)
|
||||
config: dict[str, any] = json.loads(raw_config)
|
||||
|
||||
go2rtc_config: dict[str, any] = config.get("go2rtc", {})
|
||||
|
||||
# Need to enable CORS for go2rtc so the frigate integration / card work automatically
|
||||
if go2rtc_config.get("api") is None:
|
||||
go2rtc_config["api"] = {"origin": "*"}
|
||||
elif go2rtc_config["api"].get("origin") is None:
|
||||
go2rtc_config["api"]["origin"] = "*"
|
||||
|
||||
# we want to ensure that logs are easy to read
|
||||
if go2rtc_config.get("log") is None:
|
||||
go2rtc_config["log"] = {"format": "text"}
|
||||
@@ -34,7 +45,9 @@ elif go2rtc_config["log"].get("format") is None:
|
||||
if not go2rtc_config.get("webrtc", {}).get("candidates", []):
|
||||
default_candidates = []
|
||||
# use internal candidate if it was discovered when running through the add-on
|
||||
internal_candidate = os.environ.get("FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL", None)
|
||||
internal_candidate = os.environ.get(
|
||||
"FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL", None
|
||||
)
|
||||
if internal_candidate is not None:
|
||||
default_candidates.append(internal_candidate)
|
||||
# should set default stun server so webrtc can work
|
||||
@@ -42,8 +55,10 @@ if not go2rtc_config.get("webrtc", {}).get("candidates", []):
|
||||
|
||||
go2rtc_config["webrtc"] = {"candidates": default_candidates}
|
||||
else:
|
||||
print("[INFO] Not injecting WebRTC candidates into go2rtc config as it has been set manually", file=sys.stderr)
|
||||
|
||||
print(
|
||||
"[INFO] Not injecting WebRTC candidates into go2rtc config as it has been set manually",
|
||||
)
|
||||
|
||||
# sets default RTSP response to be equivalent to ?video=h264,h265&audio=aac
|
||||
# this means user does not need to specify audio codec when using restream
|
||||
# as source for frigate and the integration supports HLS playback
|
||||
@@ -62,14 +77,30 @@ if not os.path.exists(BTBN_PATH):
|
||||
go2rtc_config["ffmpeg"][
|
||||
"rtsp"
|
||||
] = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
|
||||
|
||||
|
||||
for name in go2rtc_config.get("streams", {}):
|
||||
stream = go2rtc_config["streams"][name]
|
||||
|
||||
if isinstance(stream, str):
|
||||
go2rtc_config["streams"][name] = go2rtc_config["streams"][name].format(**FRIGATE_ENV_VARS)
|
||||
go2rtc_config["streams"][name] = go2rtc_config["streams"][name].format(
|
||||
**FRIGATE_ENV_VARS
|
||||
)
|
||||
elif isinstance(stream, list):
|
||||
for i, stream in enumerate(stream):
|
||||
go2rtc_config["streams"][name][i] = stream.format(**FRIGATE_ENV_VARS)
|
||||
|
||||
print(json.dumps(go2rtc_config))
|
||||
# add birdseye restream stream if enabled
|
||||
if config.get("birdseye", {}).get("restream", False):
|
||||
birdseye: dict[str, any] = config.get("birdseye")
|
||||
|
||||
input = f"-f rawvideo -pix_fmt yuv420p -video_size {birdseye.get('width', 1280)}x{birdseye.get('height', 720)} -r 10 -i {BIRDSEYE_PIPE}"
|
||||
ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(config.get('ffmpeg', {}).get('hwaccel_args'), input, '-rtsp_transport tcp -f rtsp {output}')}"
|
||||
|
||||
if go2rtc_config.get("streams"):
|
||||
go2rtc_config["streams"]["birdseye"] = ffmpeg_cmd
|
||||
else:
|
||||
go2rtc_config["streams"] = {"birdseye": ffmpeg_cmd}
|
||||
|
||||
# Write go2rtc_config to /dev/shm/go2rtc.yaml
|
||||
with open("/dev/shm/go2rtc.yaml", "w") as f:
|
||||
yaml.dump(go2rtc_config, f)
|
||||
|
||||
@@ -126,19 +126,17 @@ cameras:
|
||||
input_args: preset-rtsp-restream
|
||||
roles:
|
||||
- detect
|
||||
detect:
|
||||
width: 896
|
||||
height: 672
|
||||
fps: 7
|
||||
```
|
||||
|
||||
### Unifi Protect Cameras
|
||||
|
||||
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record and rtmp.
|
||||
Unifi protect cameras require the rtspx stream to be used with go2rtc https://github.com/AlexxIT/go2rtc/tree/v1.2.0#source-rtsp
|
||||
|
||||
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record and rtmp if used directly with unifi protect.
|
||||
|
||||
```yaml
|
||||
ffmpeg:
|
||||
output_args:
|
||||
record: preset-record-ubiquiti
|
||||
rtmp: preset-rtmp-ubiquiti
|
||||
rtmp: preset-rtmp-ubiquiti # recommend using go2rtc instead
|
||||
```
|
||||
|
||||
@@ -101,7 +101,7 @@ The OpenVINO device to be used is specified using the `"device"` attribute accor
|
||||
|
||||
OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. A supported Intel platform is required to use the `GPU` device with OpenVINO. The `MYRIAD` device may be run on any platform, including Arm devices. For detailed system requirements, see [OpenVINO System Requirements](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/system-requirements.html)
|
||||
|
||||
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector.
|
||||
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
@@ -119,6 +119,25 @@ model:
|
||||
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
|
||||
```
|
||||
|
||||
This detector also supports some YOLO variants: YOLOX, YOLOv5, and YOLOv8 specifically. Other YOLO variants are not officially supported/tested. Frigate does not come with any yolo models preloaded, so you will need to supply your own models. This detector has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. You can follow [these instructions](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny#download-a-model-and-convert-it-into-openvino-ir-format) to retrieve the OpenVINO-compatible `yolox_tiny` model. Make sure that the model input dimensions match the `width` and `height` parameters, and `model_type` is set accordingly. See [Full Configuration Reference](/configuration/index.md#full-configuration-reference) for a list of possible `model_type` options. Below is an example of how `yolox_tiny` can be used in Frigate:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
ov:
|
||||
type: openvino
|
||||
device: AUTO
|
||||
model:
|
||||
path: /path/to/yolox_tiny.xml
|
||||
|
||||
model:
|
||||
width: 416
|
||||
height: 416
|
||||
input_tensor: nchw
|
||||
input_pixel_format: bgr
|
||||
model_type: yolox
|
||||
labelmap_path: /path/to/coco_80cl.txt
|
||||
```
|
||||
|
||||
### Intel NCS2 VPU and Myriad X Setup
|
||||
|
||||
Intel produces a neural net inference accelleration chip called Myriad X. This chip was sold in their Neural Compute Stick 2 (NCS2) which has been discontinued. If intending to use the MYRIAD device for accelleration, additional setup is required to pass through the USB device. The host needs a udev rule installed to handle the NCS2 device.
|
||||
@@ -212,6 +231,10 @@ yolov4x-mish-320
|
||||
yolov4x-mish-640
|
||||
yolov7-tiny-288
|
||||
yolov7-tiny-416
|
||||
yolov7-640
|
||||
yolov7-320
|
||||
yolov7x-640
|
||||
yolov7x-320
|
||||
```
|
||||
|
||||
### Configuration Parameters
|
||||
|
||||
@@ -28,16 +28,17 @@ Input args presets help make the config more readable and handle use cases for d
|
||||
|
||||
See [the camera specific docs](/configuration/camera_specific.md) for more info on non-standard cameras and recommendations for using them in Frigate.
|
||||
|
||||
| Preset | Usage | Other Notes |
|
||||
| ------------------------- | ------------------------- | --------------------------------------------------- |
|
||||
| preset-http-jpeg-generic | HTTP Live Jpeg | Recommend restreaming live jpeg instead |
|
||||
| preset-http-mjpeg-generic | HTTP Mjpeg Stream | Recommend restreaming mjpeg stream instead |
|
||||
| preset-http-reolink | Reolink HTTP-FLV Stream | Only for reolink http, not when restreaming as rtsp |
|
||||
| preset-rtmp-generic | RTMP Stream | |
|
||||
| preset-rtsp-generic | RTSP Stream | This is the default when nothing is specified |
|
||||
| preset-rtsp-restream | RTSP Stream from restream | Use when using rtsp restream as source |
|
||||
| preset-rtsp-udp | RTSP Stream via UDP | Use when camera is UDP only |
|
||||
| preset-rtsp-blue-iris | Blue Iris RTSP Stream | Use when consuming a stream from Blue Iris |
|
||||
| Preset | Usage | Other Notes |
|
||||
| -------------------------------- | ------------------------- | ------------------------------------------------------------------------------------------------ |
|
||||
| preset-http-jpeg-generic | HTTP Live Jpeg | Recommend restreaming live jpeg instead |
|
||||
| preset-http-mjpeg-generic | HTTP Mjpeg Stream | Recommend restreaming mjpeg stream instead |
|
||||
| preset-http-reolink | Reolink HTTP-FLV Stream | Only for reolink http, not when restreaming as rtsp |
|
||||
| preset-rtmp-generic | RTMP Stream | |
|
||||
| preset-rtsp-generic | RTSP Stream | This is the default when nothing is specified |
|
||||
| preset-rtsp-restream | RTSP Stream from restream | Use for rtsp restream as source for frigate |
|
||||
| preset-rtsp-restream-low-latency | RTSP Stream from restream | Use for rtsp restream as source for frigate to lower latency, may cause issues with some cameras |
|
||||
| preset-rtsp-udp | RTSP Stream via UDP | Use when camera is UDP only |
|
||||
| preset-rtsp-blue-iris | Blue Iris RTSP Stream | Use when consuming a stream from Blue Iris |
|
||||
|
||||
:::caution
|
||||
|
||||
@@ -46,21 +47,22 @@ It is important to be mindful of input args when using restream because you can
|
||||
:::
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
reolink_cam: http://192.168.0.139/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=admin&password=password
|
||||
|
||||
cameras:
|
||||
reolink_cam:
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: http://192.168.0.139/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=admin&password={FRIGATE_CAM_PASSWORD}
|
||||
- path: http://192.168.0.139/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=admin&password=password
|
||||
input_args: preset-http-reolink
|
||||
roles:
|
||||
- detect
|
||||
- path: rtsp://192.168.0.10:8554/garage
|
||||
- path: rtsp://127.0.0.1:8554/reolink_cam
|
||||
input_args: preset-rtsp-generic
|
||||
roles:
|
||||
- record
|
||||
- path: http://192.168.0.139/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=admin&password={FRIGATE_CAM_PASSWORD}
|
||||
roles:
|
||||
- restream
|
||||
```
|
||||
|
||||
### Output Args Presets
|
||||
|
||||
@@ -15,23 +15,39 @@ ffmpeg:
|
||||
hwaccel_args: preset-rpi-64-h264
|
||||
```
|
||||
|
||||
### Intel-based CPUs (<10th Generation) via Quicksync
|
||||
### Intel-based CPUs (<10th Generation) via VAAPI
|
||||
|
||||
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. VAAPI is recommended for all generations of Intel-based CPUs if QSV does not work.
|
||||
|
||||
```yaml
|
||||
ffmpeg:
|
||||
hwaccel_args: preset-vaapi
|
||||
```
|
||||
|
||||
**NOTICE**: With some of the processors, like the J4125, the default driver `iHD` doesn't seem to work correctly for hardware acceleration. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the frigate.yml for HA OS users](advanced.md#environment_vars).
|
||||
|
||||
### Intel-based CPUs (>=10th Generation) via Quicksync
|
||||
|
||||
QSV must be set specifically based on the video encoding of the stream.
|
||||
|
||||
#### H.264 streams
|
||||
|
||||
```yaml
|
||||
ffmpeg:
|
||||
hwaccel_args: preset-intel-qsv-h264
|
||||
```
|
||||
|
||||
#### H.265 streams
|
||||
|
||||
```yaml
|
||||
ffmpeg:
|
||||
hwaccel_args: preset-intel-qsv-h265
|
||||
```
|
||||
|
||||
### AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver
|
||||
|
||||
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
|
||||
|
||||
**Note:** You also need to set `LIBVA_DRIVER_NAME=radeonsi` as an environment variable on the container.
|
||||
|
||||
```yaml
|
||||
|
||||
@@ -3,7 +3,7 @@ id: index
|
||||
title: Configuration File
|
||||
---
|
||||
|
||||
For Home Assistant Addon installations, the config file needs to be in the root of your Home Assistant config directory (same location as `configuration.yaml`) and named `frigate.yml`.
|
||||
For Home Assistant Addon installations, the config file needs to be in the root of your Home Assistant config directory (same location as `configuration.yaml`). It can be named `frigate.yml` or `frigate.yaml`, but if both files exist `frigate.yaml` will be preferred and `frigate.yml` will be ignored.
|
||||
|
||||
For all other installation types, the config file should be mapped to `/config/config.yml` inside the container.
|
||||
|
||||
@@ -19,7 +19,6 @@ cameras:
|
||||
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||
roles:
|
||||
- detect
|
||||
- restream
|
||||
detect:
|
||||
width: 1280
|
||||
height: 720
|
||||
@@ -37,6 +36,25 @@ It is not recommended to copy this full configuration file. Only specify values
|
||||
|
||||
:::
|
||||
|
||||
**Note:** The following values will be replaced at runtime by using environment variables
|
||||
|
||||
- `{FRIGATE_MQTT_USER}`
|
||||
- `{FRIGATE_MQTT_PASSWORD}`
|
||||
- `{FRIGATE_RTSP_USER}`
|
||||
- `{FRIGATE_RTSP_PASSWORD}`
|
||||
|
||||
for example:
|
||||
|
||||
```yaml
|
||||
mqtt:
|
||||
user: "{FRIGATE_MQTT_USER}"
|
||||
password: "{FRIGATE_MQTT_PASSWORD}"
|
||||
```
|
||||
|
||||
```yaml
|
||||
- path: rtsp://{FRIGATE_RTSP_USER}:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:8554/unicast
|
||||
```
|
||||
|
||||
```yaml
|
||||
mqtt:
|
||||
# Optional: Enable mqtt server (default: shown below)
|
||||
@@ -105,6 +123,9 @@ model:
|
||||
# Optional: Object detection model input tensor format
|
||||
# Valid values are nhwc or nchw (default: shown below)
|
||||
input_tensor: nhwc
|
||||
# Optional: Object detection model type, currently only used with the OpenVINO detector
|
||||
# Valid values are ssd, yolox, yolov5, or yolov8 (default: shown below)
|
||||
model_type: ssd
|
||||
# Optional: Label name modifications. These are merged into the standard labelmap.
|
||||
labelmap:
|
||||
2: vehicle
|
||||
@@ -146,7 +167,7 @@ birdseye:
|
||||
# More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets
|
||||
ffmpeg:
|
||||
# Optional: global ffmpeg args (default: shown below)
|
||||
global_args: -hide_banner -loglevel warning
|
||||
global_args: -hide_banner -loglevel warning -threads 2
|
||||
# Optional: global hwaccel args (default: shown below)
|
||||
# NOTE: See hardware acceleration docs for your specific device
|
||||
hwaccel_args: []
|
||||
@@ -155,7 +176,7 @@ ffmpeg:
|
||||
# Optional: global output args
|
||||
output_args:
|
||||
# Optional: output args for detect streams (default: shown below)
|
||||
detect: -f rawvideo -pix_fmt yuv420p
|
||||
detect: -threads 2 -f rawvideo -pix_fmt yuv420p
|
||||
# Optional: output args for record streams (default: shown below)
|
||||
record: preset-record-generic
|
||||
# Optional: output args for rtmp streams (default: shown below)
|
||||
@@ -172,7 +193,6 @@ detect:
|
||||
# NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera.
|
||||
fps: 5
|
||||
# Optional: enables detection for the camera (default: True)
|
||||
# This value can be set via MQTT and will be updated in startup based on retained value
|
||||
enabled: True
|
||||
# Optional: Number of frames without a detection before Frigate considers an object to be gone. (default: 5x the frame rate)
|
||||
max_disappeared: 25
|
||||
@@ -320,7 +340,6 @@ record:
|
||||
# NOTE: Can be overridden at the camera level
|
||||
snapshots:
|
||||
# Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below)
|
||||
# This value can be set via MQTT and will be updated in startup based on retained value
|
||||
enabled: False
|
||||
# Optional: save a clean PNG copy of the snapshot image (default: shown below)
|
||||
clean_copy: True
|
||||
@@ -350,7 +369,7 @@ rtmp:
|
||||
enabled: False
|
||||
|
||||
# Optional: Restream configuration
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.1.1)
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.2.0)
|
||||
go2rtc:
|
||||
|
||||
# Optional: jsmpeg stream configuration for WebUI
|
||||
@@ -405,12 +424,12 @@ cameras:
|
||||
# Required: the path to the stream
|
||||
# NOTE: path may include environment variables, which must begin with 'FRIGATE_' and be referenced in {}
|
||||
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||
# Required: list of roles for this stream. valid values are: detect,record,restream,rtmp
|
||||
# NOTICE: In addition to assigning the record, restream, and rtmp roles,
|
||||
# Required: list of roles for this stream. valid values are: detect,record,rtmp
|
||||
# NOTICE: In addition to assigning the record and rtmp roles,
|
||||
# they must also be enabled in the camera config.
|
||||
roles:
|
||||
- detect
|
||||
- restream
|
||||
- record
|
||||
- rtmp
|
||||
# Optional: stream specific global args (default: inherit)
|
||||
# global_args:
|
||||
@@ -485,7 +504,30 @@ ui:
|
||||
# Optional: Set a timezone to use in the UI (default: use browser local time)
|
||||
timezone: None
|
||||
# Optional: Use an experimental recordings / camera view UI (default: shown below)
|
||||
experimental_ui: False
|
||||
use_experimental: False
|
||||
# Optional: Set the time format used.
|
||||
# Options are browser, 12hour, or 24hour (default: shown below)
|
||||
time_format: browser
|
||||
# Optional: Set the date style for a specified length.
|
||||
# Options are: full, long, medium, short
|
||||
# Examples:
|
||||
# short: 2/11/23
|
||||
# medium: Feb 11, 2023
|
||||
# full: Saturday, February 11, 2023
|
||||
# (default: shown below).
|
||||
date_style: short
|
||||
# Optional: Set the time style for a specified length.
|
||||
# Options are: full, long, medium, short
|
||||
# Examples:
|
||||
# short: 8:14 PM
|
||||
# medium: 8:15:22 PM
|
||||
# full: 8:15:22 PM Mountain Standard Time
|
||||
# (default: shown below).
|
||||
time_style: medium
|
||||
# Optional: Ability to manually override the date / time styling to use strftime format
|
||||
# https://www.gnu.org/software/libc/manual/html_node/Formatting-Calendar-Time.html
|
||||
# possible values are shown above (default: not set)
|
||||
strftime_fmt: "%Y/%m/%d %H:%M"
|
||||
|
||||
# Optional: Telemetry configuration
|
||||
telemetry:
|
||||
|
||||
@@ -59,7 +59,7 @@ cameras:
|
||||
roles:
|
||||
- detect
|
||||
live:
|
||||
stream_name: test_cam_sub
|
||||
stream_name: rtsp_cam_sub
|
||||
```
|
||||
|
||||
### WebRTC extra configuration:
|
||||
@@ -101,4 +101,4 @@ If you are having difficulties getting WebRTC to work and you are running Frigat
|
||||
|
||||
:::
|
||||
|
||||
See https://github.com/AlexxIT/go2rtc#module-webrtc for more information about this.
|
||||
See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#module-webrtc) for more information about this.
|
||||
|
||||
@@ -7,7 +7,13 @@ title: Restream
|
||||
|
||||
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
|
||||
|
||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc#configuration) for more advanced configurations and features.
|
||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.2.0) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#configuration) for more advanced configurations and features.
|
||||
|
||||
:::note
|
||||
|
||||
You can access the go2rtc webUI at `http://frigate_ip:5000/live/webrtc` which can be helpful to debug as well as provide useful information about your camera streams.
|
||||
|
||||
:::
|
||||
|
||||
### Birdseye Restream
|
||||
|
||||
@@ -124,7 +130,7 @@ cameras:
|
||||
|
||||
## Advanced Restream Configurations
|
||||
|
||||
The [exec](https://github.com/AlexxIT/go2rtc#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
|
||||
NOTE: The output will need to be passed with two curly braces `{{output}}`
|
||||
|
||||
|
||||
@@ -11,6 +11,24 @@ During testing, enable the Zones option for the debug feed so you can adjust as
|
||||
|
||||
To create a zone, follow [the steps for a "Motion mask"](masks.md), but use the section of the web UI for creating a zone instead.
|
||||
|
||||
### Restricting events to specific zones
|
||||
|
||||
Often you will only want events to be created when an object enters areas of interest. This is done using zones along with setting required_zones. Let's say you only want to be notified when an object enters your entire_yard zone, the config would be:
|
||||
|
||||
```yaml
|
||||
camera:
|
||||
record:
|
||||
events:
|
||||
required_zones:
|
||||
- entire_yard
|
||||
snapshots:
|
||||
required_zones:
|
||||
- entire_yard
|
||||
zones:
|
||||
entire_yard:
|
||||
coordinates: ...
|
||||
```
|
||||
|
||||
### Restricting zones to specific objects
|
||||
|
||||
Sometimes you want to limit a zone to specific object types to have more granular control of when events/snapshots are saved. The following example will limit one zone to person objects and the other to cars.
|
||||
|
||||
@@ -3,7 +3,7 @@ id: camera_setup
|
||||
title: Camera setup
|
||||
---
|
||||
|
||||
Cameras configured to output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. H.265 has better compression, but far less compatibility. Safari and Edge are the only browsers able to play H.265. Ideally, cameras should be configured directly for the desired resolutions and frame rates you want to use in Frigate. Reducing frame rates within Frigate will waste CPU resources decoding extra frames that are discarded. There are three different goals that you want to tune your stream configurations around.
|
||||
Cameras configured to output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. H.265 has better compression, but less compatibility. Chrome 108+, Safari and Edge are the only browsers able to play H.265 and only support a limited number of H.265 profiles. Ideally, cameras should be configured directly for the desired resolutions and frame rates you want to use in Frigate. Reducing frame rates within Frigate will waste CPU resources decoding extra frames that are discarded. There are three different goals that you want to tune your stream configurations around.
|
||||
|
||||
- **Detection**: This is the only stream that Frigate will decode for processing. Also, this is the stream where snapshots will be generated from. The resolution for detection should be tuned for the size of the objects you want to detect. See [Choosing a detect resolution](#choosing-a-detect-resolution) for more details. The recommended frame rate is 5fps, but may need to be higher for very fast moving objects. Higher resolutions and frame rates will drive higher CPU usage on your server.
|
||||
|
||||
|
||||
@@ -80,10 +80,15 @@ The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which
|
||||
Inference speeds will vary greatly depending on the GPU and the model used.
|
||||
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below:
|
||||
|
||||
| Name | Model | Inference Speed |
|
||||
| -------- | --------------- | --------------- |
|
||||
| RTX 3050 | yolov4-tiny-416 | ~ 5 ms |
|
||||
| RTX 3050 | yolov7-tiny-416 | ~ 6 ms |
|
||||
| Name | Inference Speed |
|
||||
| --------------- | ----------------- |
|
||||
| GTX 1060 6GB | ~ 7 ms |
|
||||
| GTX 1070 | ~ 6 ms |
|
||||
| GTX 1660 SUPER | ~ 4 ms |
|
||||
| RTX 3050 | 5 - 7 ms |
|
||||
| RTX 3070 Mobile | ~ 5 ms |
|
||||
| Quadro P400 2GB | 20 - 25 ms |
|
||||
| Quadro P2000 | ~ 12 ms |
|
||||
|
||||
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
|
||||
|
||||
|
||||
@@ -163,7 +163,10 @@ docker run -d \
|
||||
|
||||
:::caution
|
||||
|
||||
Due to limitations in Home Assistant Operating System, utilizing external storage for recordings or snapshots requires [modifying udev rules manually](https://community.home-assistant.io/t/solved-mount-usb-drive-in-hassio-to-be-used-on-the-media-folder-with-udev-customization/258406/46).
|
||||
There are important limitations in Home Assistant Operating System to be aware of:
|
||||
- Utilizing external storage for recordings or snapshots requires [modifying udev rules manually](https://community.home-assistant.io/t/solved-mount-usb-drive-in-hassio-to-be-used-on-the-media-folder-with-udev-customization/258406/46).
|
||||
- AMD GPUs are not supported because HA OS does not include the mesa driver.
|
||||
- Nvidia GPUs are not supported because addons do not support the nvidia runtime.
|
||||
|
||||
:::
|
||||
|
||||
@@ -194,6 +197,13 @@ There are several versions of the addon available:
|
||||
|
||||
## Home Assistant Supervised
|
||||
|
||||
:::caution
|
||||
|
||||
There are important limitations in Home Assistant Supervised to be aware of:
|
||||
- Nvidia GPUs are not supported because addons do not support the nvidia runtime.
|
||||
|
||||
:::
|
||||
|
||||
:::tip
|
||||
|
||||
If possible, it is recommended to run Frigate standalone in Docker and use [Frigate's Proxy Addon](https://github.com/blakeblackshear/frigate-hass-addons/blob/main/frigate_proxy/README.md).
|
||||
|
||||
@@ -10,7 +10,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
|
||||
|
||||
# Setup a go2rtc stream
|
||||
|
||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc#module-streams), not just rtsp.
|
||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#module-streams), not just rtsp.
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
@@ -23,7 +23,7 @@ The easiest live view to get working is MSE. After adding this to the config, re
|
||||
|
||||
### What if my video doesn't play?
|
||||
|
||||
If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration:
|
||||
If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then the video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration:
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
@@ -33,6 +33,15 @@ go2rtc:
|
||||
- "ffmpeg:back#video=h264"
|
||||
```
|
||||
|
||||
Some camera streams may need to use the ffmpeg module in go2rtc. This has the downside of slower startup times, but has compatibility with more stream types.
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
back:
|
||||
- ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||
```
|
||||
|
||||
If you can see the video but do not have audio, this is most likely because your camera's audio stream is not AAC. If possible, update your camera's audio settings to AAC. If your cameras do not support AAC audio, you will need to tell go2rtc to re-encode the audio to AAC on demand if you want audio. This will use additional CPU and add some latency. To add AAC audio on demand, you can update your go2rtc config as follows:
|
||||
|
||||
```yaml
|
||||
@@ -53,6 +62,15 @@ go2rtc:
|
||||
- "ffmpeg:back#video=h264#audio=aac"
|
||||
```
|
||||
|
||||
When using the ffmpeg module, you would add AAC audio like this:
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
back:
|
||||
- "ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2#video=copy#audio=copy#audio=aac"
|
||||
```
|
||||
|
||||
## Next steps
|
||||
|
||||
1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera).
|
||||
|
||||
@@ -16,6 +16,8 @@ See the [MQTT integration
|
||||
documentation](https://www.home-assistant.io/integrations/mqtt/) for more
|
||||
details.
|
||||
|
||||
In addition, MQTT must be enabled in your Frigate configuration file and Frigate must be connected to the same MQTT server as Home Assistant for many of the entities created by the integration to function.
|
||||
|
||||
### Integration installation
|
||||
|
||||
Available via HACS as a default repository. To install:
|
||||
@@ -30,7 +32,7 @@ Home Assistant > HACS > Integrations > "Explore & Add Integrations" > Frigate
|
||||
- Then add/configure the integration:
|
||||
|
||||
```
|
||||
Home Assistant > Configuration > Integrations > Add Integration > Frigate
|
||||
Home Assistant > Settings > Devices & Services > Add Integration > Frigate
|
||||
```
|
||||
|
||||
Note: You will also need
|
||||
@@ -64,13 +66,13 @@ Home Assistant > Configuration > Integrations > Frigate > Options
|
||||
|
||||
| Option | Description |
|
||||
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| RTSP URL Template | A [jinja2](https://jinja.palletsprojects.com/) template that is used to override the standard RTMP stream URL (e.g. for use with reverse proxies). This option is only shown to users who have [advanced mode](https://www.home-assistant.io/blog/2019/07/17/release-96/#advanced-mode) enabled. See [RTSP streams](#streams) below. |
|
||||
| RTSP URL Template | A [jinja2](https://jinja.palletsprojects.com/) template that is used to override the standard RTSP stream URL (e.g. for use with reverse proxies). This option is only shown to users who have [advanced mode](https://www.home-assistant.io/blog/2019/07/17/release-96/#advanced-mode) enabled. See [RTSP streams](#streams) below. |
|
||||
|
||||
## Entities Provided
|
||||
|
||||
| Platform | Description |
|
||||
| --------------- | --------------------------------------------------------------------------------- |
|
||||
| `camera` | Live camera stream (requires RTMP), camera for image of the last detected object. |
|
||||
| `camera` | Live camera stream (requires RTSP), camera for image of the last detected object. |
|
||||
| `sensor` | States to monitor Frigate performance, object counts for all zones and cameras. |
|
||||
| `switch` | Switch entities to toggle detection, recordings and snapshots. |
|
||||
| `binary_sensor` | A "motion" binary sensor entity per camera/zone/object. |
|
||||
|
||||
@@ -39,6 +39,12 @@ You cannot use the `environment_vars` section of your configuration file to set
|
||||
|
||||
Once your API key is configured, you can submit examples directly from the events page in Frigate using the `SEND TO FRIGATE+` button.
|
||||
|
||||
:::note
|
||||
|
||||
Snapshots must be enabled to be able to submit examples to Frigate+
|
||||
|
||||
:::
|
||||
|
||||

|
||||
|
||||
### Annotate and verify
|
||||
|
||||
@@ -17,16 +17,18 @@ ffmpeg:
|
||||
record: preset-record-generic-audio-aac
|
||||
```
|
||||
|
||||
### I can't view events or recordings in the Web UI.
|
||||
|
||||
Ensure your cameras send h264 encoded video, or [transcode them](/configuration/restream.md).
|
||||
|
||||
You can open `chrome://media-internals/` in another tab and then try to playback, the media internals page will give information about why playback is failing.
|
||||
|
||||
### My mjpeg stream or snapshots look green and crazy
|
||||
|
||||
This almost always means that the width/height defined for your camera are not correct. Double check the resolution with VLC or another player. Also make sure you don't have the width and height values backwards.
|
||||
|
||||

|
||||
|
||||
### I can't view events or recordings in the Web UI.
|
||||
|
||||
Ensure your cameras send h264 encoded video, or [transcode them](/configuration/restream.md).
|
||||
|
||||
### "[mov,mp4,m4a,3gp,3g2,mj2 @ 0x5639eeb6e140] moov atom not found"
|
||||
|
||||
These messages in the logs are expected in certain situations. Frigate checks the integrity of the recordings before storing. Occasionally these cached files will be invalid and cleaned up automatically.
|
||||
|
||||
@@ -28,7 +28,6 @@ from frigate.object_processing import TrackedObjectProcessor
|
||||
from frigate.output import output_frames
|
||||
from frigate.plus import PlusApi
|
||||
from frigate.record import RecordingCleanup, RecordingMaintainer
|
||||
from frigate.restream import RestreamApi
|
||||
from frigate.stats import StatsEmitter, stats_init
|
||||
from frigate.storage import StorageMaintainer
|
||||
from frigate.version import VERSION
|
||||
@@ -173,18 +172,13 @@ class FrigateApp:
|
||||
self.plus_api,
|
||||
)
|
||||
|
||||
def init_restream(self) -> None:
|
||||
self.restream = RestreamApi(self.config)
|
||||
self.restream.add_cameras()
|
||||
|
||||
def init_dispatcher(self) -> None:
|
||||
comms: list[Communicator] = []
|
||||
|
||||
if self.config.mqtt.enabled:
|
||||
comms.append(MqttClient(self.config))
|
||||
|
||||
self.ws_client = WebSocketClient(self.config)
|
||||
comms.append(self.ws_client)
|
||||
comms.append(WebSocketClient(self.config))
|
||||
self.dispatcher = Dispatcher(self.config, self.camera_metrics, comms)
|
||||
|
||||
def start_detectors(self) -> None:
|
||||
@@ -382,7 +376,6 @@ class FrigateApp:
|
||||
print(e)
|
||||
self.log_process.terminate()
|
||||
sys.exit(1)
|
||||
self.init_restream()
|
||||
self.start_detectors()
|
||||
self.start_video_output_processor()
|
||||
self.start_detected_frames_processor()
|
||||
@@ -417,7 +410,17 @@ class FrigateApp:
|
||||
logger.info(f"Stopping...")
|
||||
self.stop_event.set()
|
||||
|
||||
self.ws_client.stop()
|
||||
for detector in self.detectors.values():
|
||||
detector.stop()
|
||||
|
||||
# Empty the detection queue and set the events for all requests
|
||||
while not self.detection_queue.empty():
|
||||
connection_id = self.detection_queue.get(timeout=1)
|
||||
self.detection_out_events[connection_id].set()
|
||||
self.detection_queue.close()
|
||||
self.detection_queue.join_thread()
|
||||
|
||||
self.dispatcher.stop()
|
||||
self.detected_frames_processor.join()
|
||||
self.event_processor.join()
|
||||
self.event_cleanup.join()
|
||||
@@ -427,10 +430,20 @@ class FrigateApp:
|
||||
self.frigate_watchdog.join()
|
||||
self.db.stop()
|
||||
|
||||
for detector in self.detectors.values():
|
||||
detector.stop()
|
||||
|
||||
while len(self.detection_shms) > 0:
|
||||
shm = self.detection_shms.pop()
|
||||
shm.close()
|
||||
shm.unlink()
|
||||
|
||||
for queue in [
|
||||
self.event_queue,
|
||||
self.event_processed_queue,
|
||||
self.video_output_queue,
|
||||
self.detected_frames_queue,
|
||||
self.recordings_info_queue,
|
||||
self.log_queue,
|
||||
]:
|
||||
while not queue.empty():
|
||||
queue.get_nowait()
|
||||
queue.close()
|
||||
queue.join_thread()
|
||||
|
||||
@@ -27,6 +27,11 @@ class Communicator(ABC):
|
||||
"""Pass receiver so communicators can pass commands."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def stop(self) -> None:
|
||||
"""Stop the communicator."""
|
||||
pass
|
||||
|
||||
|
||||
class Dispatcher:
|
||||
"""Handle communication between Frigate and communicators."""
|
||||
@@ -72,6 +77,10 @@ class Dispatcher:
|
||||
for comm in self.comms:
|
||||
comm.publish(topic, payload, retain)
|
||||
|
||||
def stop(self) -> None:
|
||||
for comm in self.comms:
|
||||
comm.stop()
|
||||
|
||||
def _on_detect_command(self, camera_name: str, payload: str) -> None:
|
||||
"""Callback for detect topic."""
|
||||
detect_settings = self.config.cameras[camera_name].detect
|
||||
|
||||
@@ -35,6 +35,9 @@ class MqttClient(Communicator): # type: ignore[misc]
|
||||
f"{self.mqtt_config.topic_prefix}/{topic}", payload, retain=retain
|
||||
)
|
||||
|
||||
def stop(self) -> None:
|
||||
self.client.disconnect()
|
||||
|
||||
def _set_initial_topics(self) -> None:
|
||||
"""Set initial state topics."""
|
||||
for camera_name, camera in self.config.cameras.items():
|
||||
|
||||
@@ -95,3 +95,4 @@ class WebSocketClient(Communicator): # type: ignore[misc]
|
||||
self.websocket_server.manager.join()
|
||||
self.websocket_server.shutdown()
|
||||
self.websocket_thread.join()
|
||||
logger.info("Exiting websocket client...")
|
||||
|
||||
@@ -66,12 +66,37 @@ class LiveModeEnum(str, Enum):
|
||||
webrtc = "webrtc"
|
||||
|
||||
|
||||
class TimeFormatEnum(str, Enum):
|
||||
browser = "browser"
|
||||
hours12 = "12hour"
|
||||
hours24 = "24hour"
|
||||
|
||||
|
||||
class DateTimeStyleEnum(str, Enum):
|
||||
full = "full"
|
||||
long = "long"
|
||||
medium = "medium"
|
||||
short = "short"
|
||||
|
||||
|
||||
class UIConfig(FrigateBaseModel):
|
||||
live_mode: LiveModeEnum = Field(
|
||||
default=LiveModeEnum.mse, title="Default Live Mode."
|
||||
)
|
||||
timezone: Optional[str] = Field(title="Override UI timezone.")
|
||||
use_experimental: bool = Field(default=False, title="Experimental UI")
|
||||
time_format: TimeFormatEnum = Field(
|
||||
default=TimeFormatEnum.browser, title="Override UI time format."
|
||||
)
|
||||
date_style: DateTimeStyleEnum = Field(
|
||||
default=DateTimeStyleEnum.short, title="Override UI dateStyle."
|
||||
)
|
||||
time_style: DateTimeStyleEnum = Field(
|
||||
default=DateTimeStyleEnum.medium, title="Override UI timeStyle."
|
||||
)
|
||||
strftime_fmt: Optional[str] = Field(
|
||||
default=None, title="Override date and time format using strftime syntax."
|
||||
)
|
||||
|
||||
|
||||
class TelemetryConfig(FrigateBaseModel):
|
||||
@@ -370,9 +395,18 @@ class BirdseyeCameraConfig(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning"]
|
||||
# Note: Setting threads to less than 2 caused several issues with recording segments
|
||||
# https://github.com/blakeblackshear/frigate/issues/5659
|
||||
FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning", "-threads", "2"]
|
||||
FFMPEG_INPUT_ARGS_DEFAULT = "preset-rtsp-generic"
|
||||
DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "rawvideo", "-pix_fmt", "yuv420p"]
|
||||
DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
|
||||
"-threads",
|
||||
"2",
|
||||
"-f",
|
||||
"rawvideo",
|
||||
"-pix_fmt",
|
||||
"yuv420p",
|
||||
]
|
||||
RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-rtmp-generic"
|
||||
RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic"
|
||||
|
||||
|
||||
@@ -23,6 +23,13 @@ class InputTensorEnum(str, Enum):
|
||||
nhwc = "nhwc"
|
||||
|
||||
|
||||
class ModelTypeEnum(str, Enum):
|
||||
ssd = "ssd"
|
||||
yolox = "yolox"
|
||||
yolov5 = "yolov5"
|
||||
yolov8 = "yolov8"
|
||||
|
||||
|
||||
class ModelConfig(BaseModel):
|
||||
path: Optional[str] = Field(title="Custom Object detection model path.")
|
||||
labelmap_path: Optional[str] = Field(title="Label map for custom object detector.")
|
||||
@@ -37,6 +44,9 @@ class ModelConfig(BaseModel):
|
||||
input_pixel_format: PixelFormatEnum = Field(
|
||||
default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format"
|
||||
)
|
||||
model_type: ModelTypeEnum = Field(
|
||||
default=ModelTypeEnum.ssd, title="Object Detection Model Type"
|
||||
)
|
||||
_merged_labelmap: Optional[Dict[int, str]] = PrivateAttr()
|
||||
_colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr()
|
||||
|
||||
|
||||
@@ -13,12 +13,19 @@ from .detector_config import BaseDetectorConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
plugin_modules = [
|
||||
importlib.import_module(name)
|
||||
for finder, name, ispkg in pkgutil.iter_modules(
|
||||
plugins.__path__, plugins.__name__ + "."
|
||||
)
|
||||
]
|
||||
|
||||
_included_modules = pkgutil.iter_modules(plugins.__path__, plugins.__name__ + ".")
|
||||
|
||||
plugin_modules = []
|
||||
|
||||
for _, name, _ in _included_modules:
|
||||
try:
|
||||
# currently openvino may fail when importing
|
||||
# on an arm device with 64 KiB page size.
|
||||
plugin_modules.append(importlib.import_module(name))
|
||||
except ImportError as e:
|
||||
logger.error(f"Error importing detector runtime: {e}")
|
||||
|
||||
|
||||
api_types = {det.type_key: det for det in DetectionApi.__subclasses__()}
|
||||
|
||||
|
||||
@@ -5,7 +5,11 @@ from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from typing import Literal
|
||||
from pydantic import Extra, Field
|
||||
import tflite_runtime.interpreter as tflite
|
||||
|
||||
try:
|
||||
from tflite_runtime.interpreter import Interpreter
|
||||
except ModuleNotFoundError:
|
||||
from tensorflow.lite.python.interpreter import Interpreter
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -22,7 +26,7 @@ class CpuTfl(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, detector_config: CpuDetectorConfig):
|
||||
self.interpreter = tflite.Interpreter(
|
||||
self.interpreter = Interpreter(
|
||||
model_path=detector_config.model.path or "/cpu_model.tflite",
|
||||
num_threads=detector_config.num_threads or 3,
|
||||
)
|
||||
|
||||
@@ -5,8 +5,11 @@ from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from typing import Literal
|
||||
from pydantic import Extra, Field
|
||||
import tflite_runtime.interpreter as tflite
|
||||
from tflite_runtime.interpreter import load_delegate
|
||||
|
||||
try:
|
||||
from tflite_runtime.interpreter import Interpreter, load_delegate
|
||||
except ModuleNotFoundError:
|
||||
from tensorflow.lite.python.interpreter import Interpreter, load_delegate
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -33,7 +36,7 @@ class EdgeTpuTfl(DetectionApi):
|
||||
logger.info(f"Attempting to load TPU as {device_config['device']}")
|
||||
edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config)
|
||||
logger.info("TPU found")
|
||||
self.interpreter = tflite.Interpreter(
|
||||
self.interpreter = Interpreter(
|
||||
model_path=detector_config.model.path or "/edgetpu_model.tflite",
|
||||
experimental_delegates=[edge_tpu_delegate],
|
||||
)
|
||||
|
||||
@@ -3,7 +3,7 @@ import numpy as np
|
||||
import openvino.runtime as ov
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
|
||||
from typing import Literal
|
||||
from pydantic import Extra, Field
|
||||
|
||||
@@ -24,12 +24,18 @@ class OvDetector(DetectionApi):
|
||||
def __init__(self, detector_config: OvDetectorConfig):
|
||||
self.ov_core = ov.Core()
|
||||
self.ov_model = self.ov_core.read_model(detector_config.model.path)
|
||||
self.ov_model_type = detector_config.model.model_type
|
||||
|
||||
self.h = detector_config.model.height
|
||||
self.w = detector_config.model.width
|
||||
|
||||
self.interpreter = self.ov_core.compile_model(
|
||||
model=self.ov_model, device_name=detector_config.device
|
||||
)
|
||||
|
||||
logger.info(f"Model Input Shape: {self.interpreter.input(0).shape}")
|
||||
self.output_indexes = 0
|
||||
|
||||
while True:
|
||||
try:
|
||||
tensor_shape = self.interpreter.output(self.output_indexes).shape
|
||||
@@ -38,29 +44,131 @@ class OvDetector(DetectionApi):
|
||||
except:
|
||||
logger.info(f"Model has {self.output_indexes} Output Tensors")
|
||||
break
|
||||
if self.ov_model_type == ModelTypeEnum.yolox:
|
||||
self.num_classes = tensor_shape[2] - 5
|
||||
logger.info(f"YOLOX model has {self.num_classes} classes")
|
||||
self.set_strides_grids()
|
||||
|
||||
def set_strides_grids(self):
|
||||
grids = []
|
||||
expanded_strides = []
|
||||
|
||||
strides = [8, 16, 32]
|
||||
|
||||
hsizes = [self.h // stride for stride in strides]
|
||||
wsizes = [self.w // stride for stride in strides]
|
||||
|
||||
for hsize, wsize, stride in zip(hsizes, wsizes, strides):
|
||||
xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
|
||||
grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
|
||||
grids.append(grid)
|
||||
shape = grid.shape[:2]
|
||||
expanded_strides.append(np.full((*shape, 1), stride))
|
||||
self.grids = np.concatenate(grids, 1)
|
||||
self.expanded_strides = np.concatenate(expanded_strides, 1)
|
||||
|
||||
## Takes in class ID, confidence score, and array of [x, y, w, h] that describes detection position,
|
||||
## returns an array that's easily passable back to Frigate.
|
||||
def process_yolo(self, class_id, conf, pos):
|
||||
return [
|
||||
class_id, # class ID
|
||||
conf, # confidence score
|
||||
(pos[1] - (pos[3] / 2)) / self.h, # y_min
|
||||
(pos[0] - (pos[2] / 2)) / self.w, # x_min
|
||||
(pos[1] + (pos[3] / 2)) / self.h, # y_max
|
||||
(pos[0] + (pos[2] / 2)) / self.w, # x_max
|
||||
]
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
|
||||
infer_request = self.interpreter.create_infer_request()
|
||||
infer_request.infer([tensor_input])
|
||||
|
||||
results = infer_request.get_output_tensor()
|
||||
if self.ov_model_type == ModelTypeEnum.ssd:
|
||||
results = infer_request.get_output_tensor()
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
i = 0
|
||||
for object_detected in results.data[0, 0, :]:
|
||||
if object_detected[0] != -1:
|
||||
logger.debug(object_detected)
|
||||
if object_detected[2] < 0.1 or i == 20:
|
||||
break
|
||||
detections[i] = [
|
||||
object_detected[1], # Label ID
|
||||
float(object_detected[2]), # Confidence
|
||||
object_detected[4], # y_min
|
||||
object_detected[3], # x_min
|
||||
object_detected[6], # y_max
|
||||
object_detected[5], # x_max
|
||||
]
|
||||
i += 1
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
i = 0
|
||||
for object_detected in results.data[0, 0, :]:
|
||||
if object_detected[0] != -1:
|
||||
logger.debug(object_detected)
|
||||
if object_detected[2] < 0.1 or i == 20:
|
||||
break
|
||||
detections[i] = [
|
||||
object_detected[1], # Label ID
|
||||
float(object_detected[2]), # Confidence
|
||||
object_detected[4], # y_min
|
||||
object_detected[3], # x_min
|
||||
object_detected[6], # y_max
|
||||
object_detected[5], # x_max
|
||||
]
|
||||
i += 1
|
||||
return detections
|
||||
elif self.ov_model_type == ModelTypeEnum.yolox:
|
||||
out_tensor = infer_request.get_output_tensor()
|
||||
# [x, y, h, w, box_score, class_no_1, ..., class_no_80],
|
||||
results = out_tensor.data
|
||||
results[..., :2] = (results[..., :2] + self.grids) * self.expanded_strides
|
||||
results[..., 2:4] = np.exp(results[..., 2:4]) * self.expanded_strides
|
||||
image_pred = results[0, ...]
|
||||
|
||||
return detections
|
||||
class_conf = np.max(
|
||||
image_pred[:, 5 : 5 + self.num_classes], axis=1, keepdims=True
|
||||
)
|
||||
class_pred = np.argmax(image_pred[:, 5 : 5 + self.num_classes], axis=1)
|
||||
class_pred = np.expand_dims(class_pred, axis=1)
|
||||
|
||||
conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= 0.3).squeeze()
|
||||
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
|
||||
dets = np.concatenate((image_pred[:, :5], class_conf, class_pred), axis=1)
|
||||
dets = dets[conf_mask]
|
||||
|
||||
ordered = dets[dets[:, 5].argsort()[::-1]][:20]
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
for i, object_detected in enumerate(ordered):
|
||||
detections[i] = self.process_yolo(
|
||||
object_detected[6], object_detected[5], object_detected[:4]
|
||||
)
|
||||
return detections
|
||||
elif self.ov_model_type == ModelTypeEnum.yolov8:
|
||||
out_tensor = infer_request.get_output_tensor()
|
||||
results = out_tensor.data[0]
|
||||
output_data = np.transpose(results)
|
||||
scores = np.max(output_data[:, 4:], axis=1)
|
||||
if len(scores) == 0:
|
||||
return np.zeros((20, 6), np.float32)
|
||||
scores = np.expand_dims(scores, axis=1)
|
||||
# add scores to the last column
|
||||
dets = np.concatenate((output_data, scores), axis=1)
|
||||
# filter out lines with scores below threshold
|
||||
dets = dets[dets[:, -1] > 0.5, :]
|
||||
# limit to top 20 scores, descending order
|
||||
ordered = dets[dets[:, -1].argsort()[::-1]][:20]
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
for i, object_detected in enumerate(ordered):
|
||||
detections[i] = self.process_yolo(
|
||||
np.argmax(object_detected[4:-1]),
|
||||
object_detected[-1],
|
||||
object_detected[:4],
|
||||
)
|
||||
return detections
|
||||
elif self.ov_model_type == ModelTypeEnum.yolov5:
|
||||
out_tensor = infer_request.get_output_tensor()
|
||||
output_data = out_tensor.data[0]
|
||||
# filter out lines with scores below threshold
|
||||
conf_mask = (output_data[:, 4] >= 0.5).squeeze()
|
||||
output_data = output_data[conf_mask]
|
||||
# limit to top 20 scores, descending order
|
||||
ordered = output_data[output_data[:, 4].argsort()[::-1]][:20]
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
for i, object_detected in enumerate(ordered):
|
||||
detections[i] = self.process_yolo(
|
||||
np.argmax(object_detected[5:]),
|
||||
object_detected[4],
|
||||
object_detected[:4],
|
||||
)
|
||||
return detections
|
||||
|
||||
@@ -67,7 +67,7 @@ class EventProcessor(threading.Thread):
|
||||
|
||||
while not self.stop_event.is_set():
|
||||
try:
|
||||
event_type, camera, event_data = self.event_queue.get(timeout=10)
|
||||
event_type, camera, event_data = self.event_queue.get(timeout=1)
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
|
||||
@@ -1,14 +1,52 @@
|
||||
"""Handles inserting and maintaining ffmpeg presets."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from typing import Any
|
||||
|
||||
from frigate.version import VERSION
|
||||
from frigate.const import BTBN_PATH
|
||||
from frigate.util import vainfo_hwaccel
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LibvaGpuSelector:
|
||||
"Automatically selects the correct libva GPU."
|
||||
|
||||
_selected_gpu = None
|
||||
|
||||
def get_selected_gpu(self) -> str:
|
||||
"""Get selected libva GPU."""
|
||||
if not os.path.exists("/dev/dri"):
|
||||
return ""
|
||||
|
||||
if self._selected_gpu:
|
||||
return self._selected_gpu
|
||||
|
||||
devices = list(filter(lambda d: d.startswith("render"), os.listdir("/dev/dri")))
|
||||
|
||||
if len(devices) < 2:
|
||||
self._selected_gpu = "/dev/dri/renderD128"
|
||||
return self._selected_gpu
|
||||
|
||||
for device in devices:
|
||||
check = vainfo_hwaccel(device_name=device)
|
||||
|
||||
logger.debug(f"{device} return vainfo status code: {check.returncode}")
|
||||
|
||||
if check.returncode == 0:
|
||||
self._selected_gpu = f"/dev/dri/{device}"
|
||||
return self._selected_gpu
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
TIMEOUT_PARAM = "-timeout" if os.path.exists(BTBN_PATH) else "-stimeout"
|
||||
|
||||
_gpu_selector = LibvaGpuSelector()
|
||||
_user_agent_args = [
|
||||
"-user_agent",
|
||||
f"FFmpeg Frigate/{VERSION}",
|
||||
@@ -23,7 +61,7 @@ PRESETS_HW_ACCEL_DECODE = {
|
||||
"-hwaccel",
|
||||
"vaapi",
|
||||
"-hwaccel_device",
|
||||
"/dev/dri/renderD128",
|
||||
_gpu_selector.get_selected_gpu(),
|
||||
"-hwaccel_output_format",
|
||||
"vaapi",
|
||||
],
|
||||
@@ -31,7 +69,7 @@ PRESETS_HW_ACCEL_DECODE = {
|
||||
"-hwaccel",
|
||||
"qsv",
|
||||
"-qsv_device",
|
||||
"/dev/dri/renderD128",
|
||||
_gpu_selector.get_selected_gpu(),
|
||||
"-hwaccel_output_format",
|
||||
"qsv",
|
||||
"-c:v",
|
||||
@@ -43,7 +81,7 @@ PRESETS_HW_ACCEL_DECODE = {
|
||||
"-hwaccel",
|
||||
"qsv",
|
||||
"-qsv_device",
|
||||
"/dev/dri/renderD128",
|
||||
_gpu_selector.get_selected_gpu(),
|
||||
"-hwaccel_output_format",
|
||||
"qsv",
|
||||
"-c:v",
|
||||
@@ -54,47 +92,36 @@ PRESETS_HW_ACCEL_DECODE = {
|
||||
"cuda",
|
||||
"-hwaccel_output_format",
|
||||
"cuda",
|
||||
"-extra_hw_frames",
|
||||
"2",
|
||||
"-c:v",
|
||||
"h264_cuvid",
|
||||
],
|
||||
"preset-nvidia-h265": [
|
||||
"-hwaccel",
|
||||
"cuda",
|
||||
"-hwaccel_output_format",
|
||||
"cuda",
|
||||
"-extra_hw_frames",
|
||||
"2",
|
||||
"-c:v",
|
||||
"hevc_cuvid",
|
||||
],
|
||||
"preset-nvidia-mjpeg": [
|
||||
"-hwaccel",
|
||||
"cuda",
|
||||
"-hwaccel_output_format",
|
||||
"cuda",
|
||||
"-extra_hw_frames",
|
||||
"2",
|
||||
"-c:v",
|
||||
"mjpeg_cuvid",
|
||||
],
|
||||
}
|
||||
|
||||
PRESETS_HW_ACCEL_SCALE = {
|
||||
"preset-rpi-32-h264": "-r {0} -s {1}x{2} -f rawvideo -pix_fmt yuv420p",
|
||||
"preset-rpi-64-h264": "-r {0} -s {1}x{2} -f rawvideo -pix_fmt yuv420p",
|
||||
"preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p -f rawvideo",
|
||||
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p -f rawvideo",
|
||||
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p -f rawvideo",
|
||||
"preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p -f rawvideo",
|
||||
"preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p -f rawvideo",
|
||||
"preset-rpi-32-h264": "-r {0} -s {1}x{2}",
|
||||
"preset-rpi-64-h264": "-r {0} -s {1}x{2}",
|
||||
"preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p",
|
||||
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||
"preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||
"preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||
"default": "-r {0} -s {1}x{2}",
|
||||
}
|
||||
|
||||
PRESETS_HW_ACCEL_ENCODE = {
|
||||
"preset-rpi-32-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -g 50 -bf 0 {1}",
|
||||
"preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -g 50 -bf 0 {1}",
|
||||
"preset-rpi-32-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}",
|
||||
"preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}",
|
||||
"preset-vaapi": "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {1}",
|
||||
"preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
|
||||
"preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
|
||||
"preset-nvidia-h264": "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}",
|
||||
@@ -127,7 +154,9 @@ def parse_preset_hardware_acceleration_scale(
|
||||
scale = PRESETS_HW_ACCEL_SCALE.get(arg, "")
|
||||
|
||||
if scale:
|
||||
return scale.format(fps, width, height).split(" ")
|
||||
scale = scale.format(fps, width, height).split(" ")
|
||||
scale.extend(detect_args)
|
||||
return scale
|
||||
else:
|
||||
scale = scale.format(fps, width, height).split(" ")
|
||||
scale.extend(detect_args)
|
||||
@@ -142,6 +171,7 @@ def parse_preset_hardware_acceleration_encode(arg: Any, input: str, output: str)
|
||||
return PRESETS_HW_ACCEL_ENCODE.get(arg, PRESETS_HW_ACCEL_ENCODE["default"]).format(
|
||||
input,
|
||||
output,
|
||||
_gpu_selector.get_selected_gpu(),
|
||||
)
|
||||
|
||||
|
||||
@@ -231,6 +261,13 @@ PRESETS_INPUT = {
|
||||
"1",
|
||||
],
|
||||
"preset-rtsp-restream": _user_agent_args
|
||||
+ [
|
||||
"-rtsp_transport",
|
||||
"tcp",
|
||||
TIMEOUT_PARAM,
|
||||
"5000000",
|
||||
],
|
||||
"preset-rtsp-restream-low-latency": _user_agent_args
|
||||
+ [
|
||||
"-rtsp_transport",
|
||||
"tcp",
|
||||
|
||||
@@ -111,6 +111,7 @@ def events_summary():
|
||||
Event.select(
|
||||
Event.camera,
|
||||
Event.label,
|
||||
Event.sub_label,
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
@@ -124,6 +125,7 @@ def events_summary():
|
||||
.group_by(
|
||||
Event.camera,
|
||||
Event.label,
|
||||
Event.sub_label,
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
@@ -184,6 +186,18 @@ def send_to_plus(id):
|
||||
logger.error(message)
|
||||
return make_response(jsonify({"success": False, "message": message}), 404)
|
||||
|
||||
if event.end_time is None:
|
||||
logger.error(f"Unable to load clean png for in-progress event: {event.id}")
|
||||
return make_response(
|
||||
jsonify(
|
||||
{
|
||||
"success": False,
|
||||
"message": "Unable to load clean png for in-progress event",
|
||||
}
|
||||
),
|
||||
400,
|
||||
)
|
||||
|
||||
if event.plus_id:
|
||||
message = "Already submitted to plus"
|
||||
logger.error(message)
|
||||
@@ -202,6 +216,15 @@ def send_to_plus(id):
|
||||
400,
|
||||
)
|
||||
|
||||
if image is None or image.size == 0:
|
||||
logger.error(f"Unable to load clean png for event: {event.id}")
|
||||
return make_response(
|
||||
jsonify(
|
||||
{"success": False, "message": "Unable to load clean png for event"}
|
||||
),
|
||||
400,
|
||||
)
|
||||
|
||||
try:
|
||||
plus_id = current_app.plus_api.upload_image(image, event.camera)
|
||||
except Exception as ex:
|
||||
@@ -301,7 +324,9 @@ def get_sub_labels():
|
||||
sub_labels.remove(None)
|
||||
|
||||
if split_joined:
|
||||
for label in sub_labels:
|
||||
original_labels = sub_labels.copy()
|
||||
|
||||
for label in original_labels:
|
||||
if "," in label:
|
||||
sub_labels.remove(label)
|
||||
parts = label.split(",")
|
||||
@@ -310,6 +335,7 @@ def get_sub_labels():
|
||||
if not (part.strip()) in sub_labels:
|
||||
sub_labels.append(part.strip())
|
||||
|
||||
sub_labels.sort()
|
||||
return jsonify(sub_labels)
|
||||
|
||||
|
||||
@@ -617,7 +643,13 @@ def events():
|
||||
sub_label_clauses.append((Event.sub_label.is_null()))
|
||||
|
||||
for label in filtered_sub_labels:
|
||||
sub_label_clauses.append((Event.sub_label.cast("text") % f"*{label}*"))
|
||||
sub_label_clauses.append(
|
||||
(Event.sub_label.cast("text") == label)
|
||||
) # include exact matches
|
||||
|
||||
# include this label when part of a list
|
||||
sub_label_clauses.append((Event.sub_label.cast("text") % f"*{label},*"))
|
||||
sub_label_clauses.append((Event.sub_label.cast("text") % f"*, {label}*"))
|
||||
|
||||
sub_label_clause = reduce(operator.or_, sub_label_clauses)
|
||||
clauses.append((sub_label_clause))
|
||||
@@ -1275,12 +1307,12 @@ def ffprobe():
|
||||
output.append(
|
||||
{
|
||||
"return_code": ffprobe.returncode,
|
||||
"stderr": json.loads(ffprobe.stderr.decode("unicode_escape").strip())
|
||||
if ffprobe.stderr.decode()
|
||||
else {},
|
||||
"stderr": ffprobe.stderr.decode("unicode_escape").strip()
|
||||
if ffprobe.returncode != 0
|
||||
else "",
|
||||
"stdout": json.loads(ffprobe.stdout.decode("unicode_escape").strip())
|
||||
if ffprobe.stdout.decode()
|
||||
else {},
|
||||
if ffprobe.returncode == 0
|
||||
else "",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -2,11 +2,16 @@
|
||||
import logging
|
||||
import threading
|
||||
import os
|
||||
import signal
|
||||
import queue
|
||||
import multiprocessing as mp
|
||||
from multiprocessing.queues import Queue
|
||||
from logging import handlers
|
||||
from typing import Optional
|
||||
from types import FrameType
|
||||
from setproctitle import setproctitle
|
||||
from typing import Deque
|
||||
from typing import Deque, Optional
|
||||
from types import FrameType
|
||||
from collections import deque
|
||||
|
||||
from frigate.util import clean_camera_user_pass
|
||||
@@ -34,10 +39,21 @@ def log_process(log_queue: Queue) -> None:
|
||||
threading.current_thread().name = f"logger"
|
||||
setproctitle("frigate.logger")
|
||||
listener_configurer()
|
||||
|
||||
stop_event = mp.Event()
|
||||
|
||||
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
|
||||
stop_event.set()
|
||||
|
||||
signal.signal(signal.SIGTERM, receiveSignal)
|
||||
signal.signal(signal.SIGINT, receiveSignal)
|
||||
|
||||
while True:
|
||||
try:
|
||||
record = log_queue.get(timeout=5)
|
||||
record = log_queue.get(timeout=1)
|
||||
except (queue.Empty, KeyboardInterrupt):
|
||||
if stop_event.is_set():
|
||||
break
|
||||
continue
|
||||
logger = logging.getLogger(record.name)
|
||||
logger.handle(record)
|
||||
|
||||
@@ -88,6 +88,7 @@ def run_detector(
|
||||
stop_event = mp.Event()
|
||||
|
||||
def receiveSignal(signalNumber, frame):
|
||||
logger.info("Signal to exit detection process...")
|
||||
stop_event.set()
|
||||
|
||||
signal.signal(signal.SIGTERM, receiveSignal)
|
||||
@@ -104,7 +105,7 @@ def run_detector(
|
||||
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
connection_id = detection_queue.get(timeout=5)
|
||||
connection_id = detection_queue.get(timeout=1)
|
||||
except queue.Empty:
|
||||
continue
|
||||
input_frame = frame_manager.get(
|
||||
@@ -125,6 +126,8 @@ def run_detector(
|
||||
|
||||
avg_speed.value = (avg_speed.value * 9 + duration) / 10
|
||||
|
||||
logger.info("Exited detection process...")
|
||||
|
||||
|
||||
class ObjectDetectProcess:
|
||||
def __init__(
|
||||
@@ -144,6 +147,9 @@ class ObjectDetectProcess:
|
||||
self.start_or_restart()
|
||||
|
||||
def stop(self):
|
||||
# if the process has already exited on its own, just return
|
||||
if self.detect_process and self.detect_process.exitcode:
|
||||
return
|
||||
self.detect_process.terminate()
|
||||
logging.info("Waiting for detection process to exit gracefully...")
|
||||
self.detect_process.join(timeout=30)
|
||||
@@ -151,6 +157,7 @@ class ObjectDetectProcess:
|
||||
logging.info("Detection process didnt exit. Force killing...")
|
||||
self.detect_process.kill()
|
||||
self.detect_process.join()
|
||||
logging.info("Detection process has exited...")
|
||||
|
||||
def start_or_restart(self):
|
||||
self.detection_start.value = 0.0
|
||||
@@ -173,12 +180,13 @@ class ObjectDetectProcess:
|
||||
|
||||
|
||||
class RemoteObjectDetector:
|
||||
def __init__(self, name, labels, detection_queue, event, model_config):
|
||||
def __init__(self, name, labels, detection_queue, event, model_config, stop_event):
|
||||
self.labels = labels
|
||||
self.name = name
|
||||
self.fps = EventsPerSecond()
|
||||
self.detection_queue = detection_queue
|
||||
self.event = event
|
||||
self.stop_event = stop_event
|
||||
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
|
||||
self.np_shm = np.ndarray(
|
||||
(1, model_config.height, model_config.width, 3),
|
||||
@@ -193,11 +201,14 @@ class RemoteObjectDetector:
|
||||
def detect(self, tensor_input, threshold=0.4):
|
||||
detections = []
|
||||
|
||||
if self.stop_event.is_set():
|
||||
return detections
|
||||
|
||||
# copy input to shared memory
|
||||
self.np_shm[:] = tensor_input[:]
|
||||
self.event.clear()
|
||||
self.detection_queue.put(self.name)
|
||||
result = self.event.wait(timeout=10.0)
|
||||
result = self.event.wait(timeout=5.0)
|
||||
|
||||
# if it timed out
|
||||
if result is None:
|
||||
|
||||
@@ -901,7 +901,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
current_tracked_objects,
|
||||
motion_boxes,
|
||||
regions,
|
||||
) = self.tracked_objects_queue.get(True, 10)
|
||||
) = self.tracked_objects_queue.get(True, 1)
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
|
||||
@@ -38,16 +38,10 @@ class FFMpegConverter:
|
||||
quality: int,
|
||||
birdseye_rtsp: bool = False,
|
||||
):
|
||||
if birdseye_rtsp:
|
||||
if os.path.exists(BIRDSEYE_PIPE):
|
||||
os.remove(BIRDSEYE_PIPE)
|
||||
self.bd_pipe = None
|
||||
|
||||
os.mkfifo(BIRDSEYE_PIPE, mode=0o777)
|
||||
stdin = os.open(BIRDSEYE_PIPE, os.O_RDONLY | os.O_NONBLOCK)
|
||||
self.bd_pipe = os.open(BIRDSEYE_PIPE, os.O_WRONLY)
|
||||
os.close(stdin)
|
||||
else:
|
||||
self.bd_pipe = None
|
||||
if birdseye_rtsp:
|
||||
self.recreate_birdseye_pipe()
|
||||
|
||||
ffmpeg_cmd = [
|
||||
"ffmpeg",
|
||||
@@ -80,14 +74,36 @@ class FFMpegConverter:
|
||||
start_new_session=True,
|
||||
)
|
||||
|
||||
def recreate_birdseye_pipe(self) -> None:
|
||||
if self.bd_pipe:
|
||||
os.close(self.bd_pipe)
|
||||
|
||||
if os.path.exists(BIRDSEYE_PIPE):
|
||||
os.remove(BIRDSEYE_PIPE)
|
||||
|
||||
os.mkfifo(BIRDSEYE_PIPE, mode=0o777)
|
||||
stdin = os.open(BIRDSEYE_PIPE, os.O_RDONLY | os.O_NONBLOCK)
|
||||
self.bd_pipe = os.open(BIRDSEYE_PIPE, os.O_WRONLY)
|
||||
os.close(stdin)
|
||||
self.reading_birdseye = False
|
||||
|
||||
def write(self, b) -> None:
|
||||
self.process.stdin.write(b)
|
||||
|
||||
if self.bd_pipe:
|
||||
try:
|
||||
os.write(self.bd_pipe, b)
|
||||
self.reading_birdseye = True
|
||||
except BrokenPipeError:
|
||||
# catch error when no one is listening
|
||||
if self.reading_birdseye:
|
||||
# we know the pipe was being read from and now it is not
|
||||
# so we should recreate the pipe to ensure no partially-read
|
||||
# frames exist
|
||||
logger.debug(
|
||||
"Recreating the birdseye pipe because it was read from and now is not"
|
||||
)
|
||||
self.recreate_birdseye_pipe()
|
||||
|
||||
return
|
||||
|
||||
def read(self, length):
|
||||
@@ -109,14 +125,15 @@ class FFMpegConverter:
|
||||
|
||||
|
||||
class BroadcastThread(threading.Thread):
|
||||
def __init__(self, camera, converter, websocket_server):
|
||||
def __init__(self, camera, converter, websocket_server, stop_event):
|
||||
super(BroadcastThread, self).__init__()
|
||||
self.camera = camera
|
||||
self.converter = converter
|
||||
self.websocket_server = websocket_server
|
||||
self.stop_event = stop_event
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
while not self.stop_event.is_set():
|
||||
buf = self.converter.read(65536)
|
||||
if buf:
|
||||
manager = self.websocket_server.manager
|
||||
@@ -426,7 +443,7 @@ def output_frames(config: FrigateConfig, video_output_queue):
|
||||
cam_config.live.quality,
|
||||
)
|
||||
broadcasters[camera] = BroadcastThread(
|
||||
camera, converters[camera], websocket_server
|
||||
camera, converters[camera], websocket_server, stop_event
|
||||
)
|
||||
|
||||
if config.birdseye.enabled:
|
||||
@@ -439,7 +456,7 @@ def output_frames(config: FrigateConfig, video_output_queue):
|
||||
config.birdseye.restream,
|
||||
)
|
||||
broadcasters["birdseye"] = BroadcastThread(
|
||||
"birdseye", converters["birdseye"], websocket_server
|
||||
"birdseye", converters["birdseye"], websocket_server, stop_event
|
||||
)
|
||||
|
||||
websocket_thread.start()
|
||||
@@ -463,7 +480,7 @@ def output_frames(config: FrigateConfig, video_output_queue):
|
||||
current_tracked_objects,
|
||||
motion_boxes,
|
||||
regions,
|
||||
) = video_output_queue.get(True, 10)
|
||||
) = video_output_queue.get(True, 1)
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
|
||||
@@ -171,10 +171,12 @@ class RecordingMaintainer(threading.Thread):
|
||||
else:
|
||||
if duration == -1:
|
||||
logger.warning(
|
||||
f"Failed to probe corrupt segment {f}: {p.returncode} - {p.stderr}"
|
||||
f"Failed to probe corrupt segment {cache_path}: {p.returncode} - {p.stderr}"
|
||||
)
|
||||
|
||||
logger.warning(f"Discarding a corrupt recording segment: {f}")
|
||||
logger.warning(
|
||||
f"Discarding a corrupt recording segment: {cache_path}"
|
||||
)
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
continue
|
||||
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
"""Controls go2rtc restream."""
|
||||
|
||||
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import BIRDSEYE_PIPE
|
||||
from frigate.ffmpeg_presets import (
|
||||
parse_preset_hardware_acceleration_encode,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RestreamApi:
|
||||
"""Control go2rtc relay API."""
|
||||
|
||||
def __init__(self, config: FrigateConfig) -> None:
|
||||
self.config: FrigateConfig = config
|
||||
|
||||
def add_cameras(self) -> None:
|
||||
"""Add cameras to go2rtc."""
|
||||
self.relays: dict[str, str] = {}
|
||||
|
||||
if self.config.birdseye.restream:
|
||||
self.relays[
|
||||
"birdseye"
|
||||
] = f"exec:{parse_preset_hardware_acceleration_encode(self.config.ffmpeg.hwaccel_args, f'-f rawvideo -pix_fmt yuv420p -video_size {self.config.birdseye.width}x{self.config.birdseye.height} -r 10 -i {BIRDSEYE_PIPE}', '-rtsp_transport tcp -f rtsp {output}')}"
|
||||
|
||||
for name, path in self.relays.items():
|
||||
params = {"src": path, "name": name}
|
||||
requests.put("http://127.0.0.1:1984/api/streams", params=params)
|
||||
@@ -283,8 +283,10 @@ class StatsEmitter(threading.Thread):
|
||||
def run(self) -> None:
|
||||
time.sleep(10)
|
||||
while not self.stop_event.wait(self.config.mqtt.stats_interval):
|
||||
logger.debug("Starting stats collection")
|
||||
stats = stats_snapshot(
|
||||
self.config, self.stats_tracking, self.hwaccel_errors
|
||||
)
|
||||
self.dispatcher.publish("stats", json.dumps(stats), retain=False)
|
||||
logger.info(f"Exiting watchdog...")
|
||||
logger.debug("Finished stats collection")
|
||||
logger.info(f"Exiting stats emitter...")
|
||||
|
||||
@@ -36,3 +36,14 @@ class TestUserPassCleanup(unittest.TestCase):
|
||||
"""Test that no change is made to path with no special characters."""
|
||||
escaped = escape_special_characters(self.rtsp_with_pass)
|
||||
assert escaped == self.rtsp_with_pass
|
||||
|
||||
|
||||
class TestUserPassMasking(unittest.TestCase):
|
||||
def setUp(self) -> None:
|
||||
self.rtsp_log_message = "Did you mean file:rtsp://user:password@192.168.1.3:554"
|
||||
|
||||
def test_rtsp_in_log_message(self):
|
||||
"""Test that the rtsp url in a log message is espaced."""
|
||||
escaped = clean_camera_user_pass(self.rtsp_log_message)
|
||||
print(f"The escaped is {escaped}")
|
||||
assert escaped == "Did you mean file:rtsp://*:*@192.168.1.3:554"
|
||||
|
||||
@@ -14,7 +14,7 @@ from abc import ABC, abstractmethod
|
||||
from collections import Counter
|
||||
from collections.abc import Mapping
|
||||
from multiprocessing import shared_memory
|
||||
from typing import Any, AnyStr, Tuple
|
||||
from typing import Any, AnyStr, Optional, Tuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
@@ -722,7 +722,7 @@ def load_labels(path, encoding="utf-8"):
|
||||
|
||||
def clean_camera_user_pass(line: str) -> str:
|
||||
"""Removes user and password from line."""
|
||||
if line.startswith("rtsp://"):
|
||||
if "rtsp://" in line:
|
||||
return re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line)
|
||||
else:
|
||||
return re.sub(REGEX_HTTP_CAMERA_USER_PASS, "user=*&password=*", line)
|
||||
@@ -926,6 +926,17 @@ def get_nvidia_gpu_stats() -> dict[str, str]:
|
||||
"--format=csv",
|
||||
]
|
||||
|
||||
if (
|
||||
"CUDA_VISIBLE_DEVICES" in os.environ
|
||||
and os.environ["CUDA_VISIBLE_DEVICES"].isdigit()
|
||||
):
|
||||
nvidia_smi_command.extend(["--id", os.environ["CUDA_VISIBLE_DEVICES"]])
|
||||
elif (
|
||||
"NVIDIA_VISIBLE_DEVICES" in os.environ
|
||||
and os.environ["NVIDIA_VISIBLE_DEVICES"].isdigit()
|
||||
):
|
||||
nvidia_smi_command.extend(["--id", os.environ["NVIDIA_VISIBLE_DEVICES"]])
|
||||
|
||||
p = sp.run(
|
||||
nvidia_smi_command,
|
||||
encoding="ascii",
|
||||
@@ -965,9 +976,13 @@ def ffprobe_stream(path: str) -> sp.CompletedProcess:
|
||||
return sp.run(ffprobe_cmd, capture_output=True)
|
||||
|
||||
|
||||
def vainfo_hwaccel() -> sp.CompletedProcess:
|
||||
def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
|
||||
"""Run vainfo."""
|
||||
ffprobe_cmd = ["vainfo"]
|
||||
ffprobe_cmd = (
|
||||
["vainfo"]
|
||||
if not device_name
|
||||
else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"]
|
||||
)
|
||||
return sp.run(ffprobe_cmd, capture_output=True)
|
||||
|
||||
|
||||
|
||||
@@ -160,6 +160,7 @@ def capture_frames(
|
||||
fps: mp.Value,
|
||||
skipped_fps: mp.Value,
|
||||
current_frame: mp.Value,
|
||||
stop_event: mp.Event,
|
||||
):
|
||||
|
||||
frame_size = frame_shape[0] * frame_shape[1]
|
||||
@@ -177,6 +178,9 @@ def capture_frames(
|
||||
try:
|
||||
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
|
||||
except Exception as e:
|
||||
# shutdown has been initiated
|
||||
if stop_event.is_set():
|
||||
break
|
||||
logger.error(f"{camera_name}: Unable to read frames from ffmpeg process.")
|
||||
|
||||
if ffmpeg_process.poll() != None:
|
||||
@@ -340,6 +344,7 @@ class CameraWatchdog(threading.Thread):
|
||||
self.frame_shape,
|
||||
self.frame_queue,
|
||||
self.camera_fps,
|
||||
self.stop_event,
|
||||
)
|
||||
self.capture_thread.start()
|
||||
|
||||
@@ -368,13 +373,16 @@ class CameraWatchdog(threading.Thread):
|
||||
|
||||
|
||||
class CameraCapture(threading.Thread):
|
||||
def __init__(self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps):
|
||||
def __init__(
|
||||
self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps, stop_event
|
||||
):
|
||||
threading.Thread.__init__(self)
|
||||
self.name = f"capture:{camera_name}"
|
||||
self.camera_name = camera_name
|
||||
self.frame_shape = frame_shape
|
||||
self.frame_queue = frame_queue
|
||||
self.fps = fps
|
||||
self.stop_event = stop_event
|
||||
self.skipped_fps = EventsPerSecond()
|
||||
self.frame_manager = SharedMemoryFrameManager()
|
||||
self.ffmpeg_process = ffmpeg_process
|
||||
@@ -392,6 +400,7 @@ class CameraCapture(threading.Thread):
|
||||
self.fps,
|
||||
self.skipped_fps,
|
||||
self.current_frame,
|
||||
self.stop_event,
|
||||
)
|
||||
|
||||
|
||||
@@ -461,7 +470,7 @@ def track_camera(
|
||||
motion_contour_area,
|
||||
)
|
||||
object_detector = RemoteObjectDetector(
|
||||
name, labelmap, detection_queue, result_connection, model_config
|
||||
name, labelmap, detection_queue, result_connection, model_config, stop_event
|
||||
)
|
||||
|
||||
object_tracker = ObjectTracker(config.detect)
|
||||
@@ -601,7 +610,7 @@ def process_frames(
|
||||
break
|
||||
|
||||
try:
|
||||
frame_time = frame_queue.get(True, 10)
|
||||
frame_time = frame_queue.get(True, 1)
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
@@ -787,6 +796,7 @@ def process_frames(
|
||||
refining = True
|
||||
else:
|
||||
selected_objects.append(obj)
|
||||
|
||||
# set the detections list to only include top, complete objects
|
||||
# and new detections
|
||||
detections = selected_objects
|
||||
|
||||
41
web/package-lock.json
generated
41
web/package-lock.json
generated
@@ -10,6 +10,7 @@
|
||||
"dependencies": {
|
||||
"@cycjimmy/jsmpeg-player": "^6.0.5",
|
||||
"axios": "^1.2.2",
|
||||
"copy-to-clipboard": "3.3.3",
|
||||
"date-fns": "^2.29.3",
|
||||
"idb-keyval": "^6.2.0",
|
||||
"immer": "^9.0.16",
|
||||
@@ -19,6 +20,7 @@
|
||||
"preact-router": "^4.1.0",
|
||||
"react": "npm:@preact/compat@^17.1.2",
|
||||
"react-dom": "npm:@preact/compat@^17.1.2",
|
||||
"strftime": "^0.10.1",
|
||||
"swr": "^1.3.0",
|
||||
"video.js": "^7.20.3",
|
||||
"videojs-playlist": "^5.0.0",
|
||||
@@ -3369,6 +3371,14 @@
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/copy-to-clipboard": {
|
||||
"version": "3.3.3",
|
||||
"resolved": "https://registry.npmjs.org/copy-to-clipboard/-/copy-to-clipboard-3.3.3.tgz",
|
||||
"integrity": "sha512-2KV8NhB5JqC3ky0r9PMCAZKbUHSwtEo4CwCs0KXgruG43gX5PMqDEBbVU4OUzw2MuAWUfsuFmWvEKG5QRfSnJA==",
|
||||
"dependencies": {
|
||||
"toggle-selection": "^1.0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/core-js": {
|
||||
"version": "3.26.0",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-3.26.0.tgz",
|
||||
@@ -8547,6 +8557,14 @@
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/strftime": {
|
||||
"version": "0.10.1",
|
||||
"resolved": "https://registry.npmjs.org/strftime/-/strftime-0.10.1.tgz",
|
||||
"integrity": "sha512-nVvH6JG8KlXFPC0f8lojLgEsPA18lRpLZ+RrJh/NkQV2tqOgZfbas8gcU8SFgnnqR3rWzZPYu6N2A3xzs/8rQg==",
|
||||
"engines": {
|
||||
"node": ">=0.2.0"
|
||||
}
|
||||
},
|
||||
"node_modules/strict-event-emitter": {
|
||||
"version": "0.2.8",
|
||||
"resolved": "https://registry.npmjs.org/strict-event-emitter/-/strict-event-emitter-0.2.8.tgz",
|
||||
@@ -8901,6 +8919,11 @@
|
||||
"node": ">=8.0"
|
||||
}
|
||||
},
|
||||
"node_modules/toggle-selection": {
|
||||
"version": "1.0.6",
|
||||
"resolved": "https://registry.npmjs.org/toggle-selection/-/toggle-selection-1.0.6.tgz",
|
||||
"integrity": "sha512-BiZS+C1OS8g/q2RRbJmy59xpyghNBqrr6k5L/uKBGRsTfxmu3ffiRnd8mlGPUVayg8pvfi5urfnu8TU7DVOkLQ=="
|
||||
},
|
||||
"node_modules/totalist": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.0.tgz",
|
||||
@@ -12151,6 +12174,14 @@
|
||||
"integrity": "sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA==",
|
||||
"dev": true
|
||||
},
|
||||
"copy-to-clipboard": {
|
||||
"version": "3.3.3",
|
||||
"resolved": "https://registry.npmjs.org/copy-to-clipboard/-/copy-to-clipboard-3.3.3.tgz",
|
||||
"integrity": "sha512-2KV8NhB5JqC3ky0r9PMCAZKbUHSwtEo4CwCs0KXgruG43gX5PMqDEBbVU4OUzw2MuAWUfsuFmWvEKG5QRfSnJA==",
|
||||
"requires": {
|
||||
"toggle-selection": "^1.0.6"
|
||||
}
|
||||
},
|
||||
"core-js": {
|
||||
"version": "3.26.0",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-3.26.0.tgz",
|
||||
@@ -15868,6 +15899,11 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"strftime": {
|
||||
"version": "0.10.1",
|
||||
"resolved": "https://registry.npmjs.org/strftime/-/strftime-0.10.1.tgz",
|
||||
"integrity": "sha512-nVvH6JG8KlXFPC0f8lojLgEsPA18lRpLZ+RrJh/NkQV2tqOgZfbas8gcU8SFgnnqR3rWzZPYu6N2A3xzs/8rQg=="
|
||||
},
|
||||
"strict-event-emitter": {
|
||||
"version": "0.2.8",
|
||||
"resolved": "https://registry.npmjs.org/strict-event-emitter/-/strict-event-emitter-0.2.8.tgz",
|
||||
@@ -16154,6 +16190,11 @@
|
||||
"is-number": "^7.0.0"
|
||||
}
|
||||
},
|
||||
"toggle-selection": {
|
||||
"version": "1.0.6",
|
||||
"resolved": "https://registry.npmjs.org/toggle-selection/-/toggle-selection-1.0.6.tgz",
|
||||
"integrity": "sha512-BiZS+C1OS8g/q2RRbJmy59xpyghNBqrr6k5L/uKBGRsTfxmu3ffiRnd8mlGPUVayg8pvfi5urfnu8TU7DVOkLQ=="
|
||||
},
|
||||
"totalist": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.0.tgz",
|
||||
|
||||
@@ -18,17 +18,18 @@
|
||||
"date-fns": "^2.29.3",
|
||||
"idb-keyval": "^6.2.0",
|
||||
"immer": "^9.0.16",
|
||||
"monaco-yaml": "^4.0.2",
|
||||
"preact": "^10.11.3",
|
||||
"preact-async-route": "^2.2.1",
|
||||
"preact-router": "^4.1.0",
|
||||
"react": "npm:@preact/compat@^17.1.2",
|
||||
"react-dom": "npm:@preact/compat@^17.1.2",
|
||||
"vite-plugin-monaco-editor": "^1.1.0",
|
||||
"monaco-yaml": "^4.0.2",
|
||||
"strftime": "^0.10.1",
|
||||
"swr": "^1.3.0",
|
||||
"video.js": "^7.20.3",
|
||||
"videojs-playlist": "^5.0.0",
|
||||
"videojs-seek-buttons": "^3.0.1"
|
||||
"videojs-seek-buttons": "^3.0.1",
|
||||
"vite-plugin-monaco-editor": "^1.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@preact/preset-vite": "^2.5.0",
|
||||
|
||||
@@ -11,7 +11,15 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
|
||||
const [hasLoaded, setHasLoaded] = useState(false);
|
||||
const containerRef = useRef(null);
|
||||
const canvasRef = useRef(null);
|
||||
const [{ width: availableWidth }] = useResizeObserver(containerRef);
|
||||
const [{ width: containerWidth }] = useResizeObserver(containerRef);
|
||||
|
||||
// Add scrollbar width (when visible) to the available observer width to eliminate screen juddering.
|
||||
// https://github.com/blakeblackshear/frigate/issues/1657
|
||||
let scrollBarWidth = 0;
|
||||
if (window.innerWidth && document.body.offsetWidth) {
|
||||
scrollBarWidth = window.innerWidth - document.body.offsetWidth;
|
||||
}
|
||||
const availableWidth = scrollBarWidth ? containerWidth + scrollBarWidth : containerWidth;
|
||||
|
||||
const { name } = config ? config.cameras[camera] : '';
|
||||
const enabled = config ? config.cameras[camera].enabled : 'True';
|
||||
@@ -22,7 +30,11 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
|
||||
const scaledHeight = Math.floor(availableWidth / aspectRatio);
|
||||
return stretch ? scaledHeight : Math.min(scaledHeight, height);
|
||||
}, [availableWidth, aspectRatio, height, stretch]);
|
||||
const scaledWidth = useMemo(() => Math.ceil(scaledHeight * aspectRatio), [scaledHeight, aspectRatio]);
|
||||
const scaledWidth = useMemo(() => Math.ceil(scaledHeight * aspectRatio - scrollBarWidth), [
|
||||
scaledHeight,
|
||||
aspectRatio,
|
||||
scrollBarWidth,
|
||||
]);
|
||||
|
||||
const img = useMemo(() => new Image(), []);
|
||||
img.onload = useCallback(
|
||||
|
||||
@@ -21,7 +21,7 @@ export default function Dialog({ children, portalRootID = 'dialogs' }) {
|
||||
>
|
||||
<div
|
||||
role="modal"
|
||||
className={`absolute rounded shadow-2xl bg-white dark:bg-gray-700 max-w-sm text-gray-900 dark:text-white transition-transform transition-opacity duration-75 transform scale-90 opacity-0 ${
|
||||
className={`absolute rounded shadow-2xl bg-white dark:bg-gray-700 sm:max-w-sm md:max-w-md lg:max-w-lg text-gray-900 dark:text-white transition-transform transition-opacity duration-75 transform scale-90 opacity-0 ${
|
||||
show ? 'scale-100 opacity-100' : ''
|
||||
}`}
|
||||
>
|
||||
|
||||
@@ -13,9 +13,11 @@ export default function MultiSelect({ className, title, options, selection, onTo
|
||||
const [state, setState] = useState({
|
||||
showMenu: false,
|
||||
});
|
||||
|
||||
|
||||
const isOptionSelected = (item) => { return selection == "all" || selection.split(',').indexOf(item) > -1; }
|
||||
|
||||
|
||||
const menuHeight = Math.round(window.innerHeight * 0.55);
|
||||
|
||||
return (
|
||||
<div className={`${className} p-2`} ref={popupRef}>
|
||||
<div
|
||||
@@ -26,7 +28,7 @@ export default function MultiSelect({ className, title, options, selection, onTo
|
||||
<ArrowDropdown className="w-6" />
|
||||
</div>
|
||||
{state.showMenu ? (
|
||||
<Menu relativeTo={popupRef} onDismiss={() => setState({ showMenu: false })}>
|
||||
<Menu className={`max-h-[${menuHeight}px] overflow-scroll`} relativeTo={popupRef} onDismiss={() => setState({ showMenu: false })}>
|
||||
<div className="flex flex-wrap justify-between items-center">
|
||||
<Heading className="p-4 justify-center" size="md">{title}</Heading>
|
||||
<Button tabindex="false" className="mx-4" onClick={() => onShowAll() }>
|
||||
|
||||
@@ -80,7 +80,9 @@ export default function RelativeModal({
|
||||
|
||||
// too close to bottom
|
||||
if (top + menuHeight > windowHeight - WINDOW_PADDING + window.scrollY) {
|
||||
newTop = WINDOW_PADDING;
|
||||
// If the pop-up modal would extend beyond the bottom of the visible window,
|
||||
// reposition the modal to appear above the clicked icon instead
|
||||
newTop = top - menuHeight;
|
||||
}
|
||||
|
||||
if (top <= WINDOW_PADDING + window.scrollY) {
|
||||
|
||||
@@ -10,7 +10,10 @@ import useSWR from 'swr';
|
||||
export default function Birdseye() {
|
||||
const { data: config } = useSWR('config');
|
||||
|
||||
const [viewSource, setViewSource, sourceIsLoaded] = usePersistence('birdseye-source', 'mse');
|
||||
const [viewSource, setViewSource, sourceIsLoaded] = usePersistence(
|
||||
'birdseye-source',
|
||||
getDefaultLiveMode(config)
|
||||
);
|
||||
const sourceValues = ['mse', 'webrtc', 'jsmpeg'];
|
||||
|
||||
if (!config || !sourceIsLoaded) {
|
||||
@@ -80,3 +83,16 @@ export default function Birdseye() {
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
function getDefaultLiveMode(config) {
|
||||
if (config) {
|
||||
if (config.birdseye.restream) {
|
||||
return config.ui.live_mode;
|
||||
}
|
||||
|
||||
return 'jsmpeg';
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ export default function Camera({ camera }) {
|
||||
: 0;
|
||||
const [viewSource, setViewSource, sourceIsLoaded] = usePersistence(
|
||||
`${camera}-source`,
|
||||
getDefaultLiveMode(config, cameraConfig)
|
||||
getDefaultLiveMode(config, cameraConfig, restreamEnabled)
|
||||
);
|
||||
const sourceValues = restreamEnabled ? ['mse', 'webrtc', 'jsmpeg'] : ['jsmpeg'];
|
||||
const [options, setOptions] = usePersistence(`${camera}-feed`, emptyObject);
|
||||
|
||||
@@ -66,6 +66,7 @@ export default function Events({ path, ...props }) {
|
||||
has_clip: false,
|
||||
has_snapshot: false,
|
||||
plus_id: undefined,
|
||||
end_time: null,
|
||||
});
|
||||
const [deleteFavoriteState, setDeleteFavoriteState] = useState({
|
||||
deletingFavoriteEventId: null,
|
||||
@@ -190,6 +191,7 @@ export default function Events({ path, ...props }) {
|
||||
has_clip: event.has_clip,
|
||||
has_snapshot: event.has_snapshot,
|
||||
plus_id: event.plus_id,
|
||||
end_time: event.end_time,
|
||||
}));
|
||||
downloadButton.current = e.target;
|
||||
setState({ ...state, showDownloadMenu: true });
|
||||
@@ -287,9 +289,6 @@ export default function Events({ path, ...props }) {
|
||||
return <ActivityIndicator />;
|
||||
}
|
||||
|
||||
const timezone = config.ui?.timezone || Intl.DateTimeFormat().resolvedOptions().timeZone;
|
||||
const locale = window.navigator?.language || 'en-US';
|
||||
|
||||
return (
|
||||
<div className="space-y-4 p-2 px-4 w-full">
|
||||
<Heading>Events</Heading>
|
||||
@@ -366,7 +365,7 @@ export default function Events({ path, ...props }) {
|
||||
download
|
||||
/>
|
||||
)}
|
||||
{downloadEvent.has_snapshot && !downloadEvent.plus_id && (
|
||||
{(downloadEvent.end_time && downloadEvent.has_snapshot && !downloadEvent.plus_id) && (
|
||||
<MenuItem
|
||||
icon={UploadPlus}
|
||||
label={uploading.includes(downloadEvent.id) ? 'Uploading...' : 'Send to Frigate+'}
|
||||
@@ -508,7 +507,7 @@ export default function Events({ path, ...props }) {
|
||||
</div>
|
||||
<div className="text-sm flex">
|
||||
<Clock className="h-5 w-5 mr-2 inline" />
|
||||
{formatUnixTimestampToDateTime(event.start_time, locale, timezone)}
|
||||
{formatUnixTimestampToDateTime(event.start_time, { ...config.ui })}
|
||||
<div className="hidden md:inline">
|
||||
<span className="m-1">-</span>
|
||||
<TimeAgo time={event.start_time * 1000} dense />
|
||||
@@ -527,7 +526,7 @@ export default function Events({ path, ...props }) {
|
||||
</div>
|
||||
</div>
|
||||
<div class="hidden sm:flex flex-col justify-end mr-2">
|
||||
{event.has_snapshot && (
|
||||
{(event.end_time && event.has_snapshot) && (
|
||||
<Fragment>
|
||||
{event.plus_id ? (
|
||||
<div className="uppercase text-xs">Sent to Frigate+</div>
|
||||
|
||||
@@ -4,6 +4,7 @@ import { useCallback, useEffect, useState } from 'preact/hooks';
|
||||
import ButtonsTabbed from '../components/ButtonsTabbed';
|
||||
import useSWR from 'swr';
|
||||
import Button from '../components/Button';
|
||||
import copy from 'copy-to-clipboard';
|
||||
|
||||
export default function Logs() {
|
||||
const [logService, setLogService] = useState('frigate');
|
||||
@@ -14,10 +15,7 @@ export default function Logs() {
|
||||
const { data: nginxLogs } = useSWR('logs/nginx');
|
||||
|
||||
const handleCopyLogs = useCallback(() => {
|
||||
async function copy() {
|
||||
await window.navigator.clipboard.writeText(logs);
|
||||
}
|
||||
copy();
|
||||
copy(logs);
|
||||
}, [logs]);
|
||||
|
||||
useEffect(() => {
|
||||
|
||||
@@ -133,7 +133,7 @@ export default function Recording({ camera, date, hour = '00', minute = '00', se
|
||||
return (
|
||||
<div className="space-y-4 p-2 px-4">
|
||||
<Heading>{camera.replaceAll('_', ' ')} Recordings</Heading>
|
||||
<div className="text-xs">Dates and times are based on the browser's timezone {timezone}</div>
|
||||
<div className="text-xs">Dates and times are based on the timezone {timezone}</div>
|
||||
|
||||
<VideoPlayer
|
||||
options={{
|
||||
|
||||
@@ -49,14 +49,14 @@ export default function System() {
|
||||
});
|
||||
|
||||
if (response.status === 200) {
|
||||
setState({ ...state, showFfprobe: true, ffprobe: JSON.stringify(response.data, null, 2) });
|
||||
setState({ ...state, showFfprobe: true, ffprobe: response.data });
|
||||
} else {
|
||||
setState({ ...state, showFfprobe: true, ffprobe: 'There was an error getting the ffprobe output.' });
|
||||
}
|
||||
};
|
||||
|
||||
const onCopyFfprobe = async () => {
|
||||
copy(JSON.stringify(state.ffprobe, null, 2));
|
||||
copy(JSON.stringify(state.ffprobe).replace(/[\\\s]+/gi, ''));
|
||||
setState({ ...state, ffprobe: '', showFfprobe: false });
|
||||
};
|
||||
|
||||
@@ -68,34 +68,95 @@ export default function System() {
|
||||
const response = await axios.get('vainfo');
|
||||
|
||||
if (response.status === 200) {
|
||||
setState({ ...state, showVainfo: true, vainfo: JSON.stringify(response.data, null, 2) });
|
||||
setState({
|
||||
...state,
|
||||
showVainfo: true,
|
||||
vainfo: response.data,
|
||||
});
|
||||
} else {
|
||||
setState({ ...state, showVainfo: true, vainfo: 'There was an error getting the vainfo output.' });
|
||||
}
|
||||
};
|
||||
|
||||
const onCopyVainfo = async () => {
|
||||
copy(JSON.stringify(state.vainfo, null, 2));
|
||||
copy(JSON.stringify(state.vainfo).replace(/[\\\s]+/gi, ''));
|
||||
setState({ ...state, vainfo: '', showVainfo: false });
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="space-y-4 p-2 px-4">
|
||||
<Heading>
|
||||
System <span className="text-sm">{service.version}</span>
|
||||
</Heading>
|
||||
<div className="flex justify-between">
|
||||
<Heading>
|
||||
System <span className="text-sm">{service.version}</span>
|
||||
</Heading>
|
||||
{config && (
|
||||
<Link
|
||||
className="p-1 text-blue-500 hover:underline"
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
href="/live/webrtc/"
|
||||
>
|
||||
go2rtc dashboard
|
||||
</Link>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{service.last_updated && (
|
||||
<p>
|
||||
<span>Last refreshed: <TimeAgo time={service.last_updated * 1000} dense /></span>
|
||||
<span>
|
||||
Last refreshed: <TimeAgo time={service.last_updated * 1000} dense />
|
||||
</span>
|
||||
</p>
|
||||
)}
|
||||
|
||||
{state.showFfprobe && (
|
||||
<Dialog>
|
||||
<div className="p-4">
|
||||
<div className="p-4 mb-2 max-h-96 whitespace-pre-line overflow-scroll">
|
||||
<Heading size="lg">Ffprobe Output</Heading>
|
||||
{state.ffprobe != '' ? <p className="mb-2">{state.ffprobe}</p> : <ActivityIndicator />}
|
||||
{state.ffprobe != '' ? (
|
||||
<div>
|
||||
{state.ffprobe.map((stream, idx) => (
|
||||
<div key={idx} className="mb-2 max-h-96 whitespace-pre-line">
|
||||
<div>Stream {idx}:</div>
|
||||
<div className="px-2">Return Code: {stream.return_code}</div>
|
||||
<br />
|
||||
{stream.return_code == 0 ? (
|
||||
<div>
|
||||
{stream.stdout.streams.map((codec, idx) => (
|
||||
<div className="px-2" key={idx}>
|
||||
{codec.width ? (
|
||||
<div>
|
||||
<div>Video:</div>
|
||||
<br />
|
||||
<div>Codec: {codec.codec_long_name}</div>
|
||||
<div>
|
||||
Resolution: {codec.width}x{codec.height}
|
||||
</div>
|
||||
<div>FPS: {codec.avg_frame_rate == '0/0' ? 'Unknown' : codec.avg_frame_rate}</div>
|
||||
<br />
|
||||
</div>
|
||||
) : (
|
||||
<div>
|
||||
<div>Audio:</div>
|
||||
<br />
|
||||
<div>Codec: {codec.codec_long_name}</div>
|
||||
<br />
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
) : (
|
||||
<div className="px-2">
|
||||
<div>Error: {stream.stderr}</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
) : (
|
||||
<ActivityIndicator />
|
||||
)}
|
||||
</div>
|
||||
<div className="p-2 flex justify-start flex-row-reverse space-x-2">
|
||||
<Button className="ml-2" onClick={() => onCopyFfprobe()} type="text">
|
||||
@@ -114,10 +175,16 @@ export default function System() {
|
||||
|
||||
{state.showVainfo && (
|
||||
<Dialog>
|
||||
<div className="p-4">
|
||||
<div className="p-4 overflow-scroll whitespace-pre-line">
|
||||
<Heading size="lg">Vainfo Output</Heading>
|
||||
{state.vainfo != '' ? (
|
||||
<p className="mb-2 max-h-96 overflow-scroll">{state.vainfo}</p>
|
||||
<div className="mb-2 max-h-96 whitespace-pre-line">
|
||||
<div className="">Return Code: {state.vainfo.return_code}</div>
|
||||
<br />
|
||||
<div className="">Process {state.vainfo.return_code == 0 ? 'Output' : 'Error'}:</div>
|
||||
<br />
|
||||
<div>{state.vainfo.return_code == 0 ? state.vainfo.stdout : state.vainfo.stderr}</div>
|
||||
</div>
|
||||
) : (
|
||||
<ActivityIndicator />
|
||||
)}
|
||||
@@ -187,8 +254,9 @@ export default function System() {
|
||||
<div className="p-2">
|
||||
{gpu_usages[gpu]['gpu'] == -1 ? (
|
||||
<div className="p-4">
|
||||
There was an error getting usage stats. Either your GPU does not support this or Frigate does
|
||||
not have proper access.
|
||||
There was an error getting usage stats. This does not mean hardware acceleration is not working.
|
||||
Either your GPU does not support this or Frigate does not have proper access to get statistics.
|
||||
This is expected for the Home Assistant addon.
|
||||
</div>
|
||||
) : (
|
||||
<Table className="w-full">
|
||||
@@ -255,11 +323,15 @@ export default function System() {
|
||||
|
||||
{(() => {
|
||||
if (cameras[camera]['pid'] && cameras[camera]['detection_enabled'] == 1)
|
||||
return <Td>{cameras[camera]['detection_fps']} ({cameras[camera]['skipped_fps']} skipped)</Td>
|
||||
return (
|
||||
<Td>
|
||||
{cameras[camera]['detection_fps']} ({cameras[camera]['skipped_fps']} skipped)
|
||||
</Td>
|
||||
);
|
||||
else if (cameras[camera]['pid'] && cameras[camera]['detection_enabled'] == 0)
|
||||
return <Td>disabled</Td>
|
||||
return <Td>disabled</Td>;
|
||||
|
||||
return <Td>- </Td>
|
||||
return <Td>- </Td>;
|
||||
})()}
|
||||
|
||||
<Td>{cpu_usages[cameras[camera]['pid']]?.['cpu'] || '- '}%</Td>
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import strftime from 'strftime';
|
||||
import { fromUnixTime, intervalToDuration, formatDuration } from 'date-fns';
|
||||
export const longToDate = (long: number): Date => new Date(long * 1000);
|
||||
export const epochToLong = (date: number): number => date / 1000;
|
||||
export const dateToLong = (date: Date): number => epochToLong(date.getTime());
|
||||
import { fromUnixTime, intervalToDuration, formatDuration } from 'date-fns';
|
||||
|
||||
const getDateTimeYesterday = (dateTime: Date): Date => {
|
||||
const twentyFourHoursInMilliseconds = 24 * 60 * 60 * 1000;
|
||||
@@ -17,28 +18,54 @@ export const getNowYesterdayInLong = (): number => {
|
||||
};
|
||||
|
||||
/**
|
||||
* This function takes in a unix timestamp, locale, timezone,
|
||||
* and returns a dateTime string.
|
||||
* If unixTimestamp is not provided, it returns 'Invalid time'
|
||||
* @param unixTimestamp: number
|
||||
* @param locale: string
|
||||
* @param timezone: string
|
||||
* @returns string - dateTime or 'Invalid time' if unixTimestamp is not provided
|
||||
* This function takes in a Unix timestamp, configuration options for date/time display, and an optional strftime format string,
|
||||
* and returns a formatted date/time string.
|
||||
*
|
||||
* If the Unix timestamp is not provided, it returns "Invalid time".
|
||||
*
|
||||
* The configuration options determine how the date and time are formatted.
|
||||
* The `timezone` option allows you to specify a specific timezone for the output, otherwise the user's browser timezone will be used.
|
||||
* The `use12hour` option allows you to display time in a 12-hour format if true, and 24-hour format if false.
|
||||
* The `dateStyle` and `timeStyle` options allow you to specify pre-defined formats for displaying the date and time.
|
||||
* The `strftime_fmt` option allows you to specify a custom format using the strftime syntax.
|
||||
*
|
||||
* If both `strftime_fmt` and `dateStyle`/`timeStyle` are provided, `strftime_fmt` takes precedence.
|
||||
*
|
||||
* @param unixTimestamp The Unix timestamp to format
|
||||
* @param config An object containing the configuration options for date/time display
|
||||
* @returns The formatted date/time string, or "Invalid time" if the Unix timestamp is not provided or invalid.
|
||||
*/
|
||||
export const formatUnixTimestampToDateTime = (unixTimestamp: number, locale: string, timezone: string): string => {
|
||||
interface DateTimeStyle {
|
||||
timezone: string;
|
||||
time_format: 'browser' | '12hour' | '24hour';
|
||||
date_style: 'full' | 'long' | 'medium' | 'short';
|
||||
time_style: 'full' | 'long' | 'medium' | 'short';
|
||||
strftime_fmt: string;
|
||||
}
|
||||
|
||||
export const formatUnixTimestampToDateTime = (unixTimestamp: number, config: DateTimeStyle): string => {
|
||||
const { timezone, time_format, date_style, time_style, strftime_fmt } = config;
|
||||
const locale = window.navigator?.language || 'en-us';
|
||||
|
||||
if (isNaN(unixTimestamp)) {
|
||||
return 'Invalid time';
|
||||
}
|
||||
|
||||
try {
|
||||
const date = new Date(unixTimestamp * 1000);
|
||||
|
||||
// use strftime_fmt if defined in config file
|
||||
if (strftime_fmt) {
|
||||
const strftime_locale = strftime.timezone(getUTCOffset(date, timezone)).localizeByIdentifier(locale);
|
||||
return strftime_locale(strftime_fmt, date);
|
||||
}
|
||||
|
||||
// else use Intl.DateTimeFormat
|
||||
const formatter = new Intl.DateTimeFormat(locale, {
|
||||
day: '2-digit',
|
||||
month: '2-digit',
|
||||
year: 'numeric',
|
||||
hour: '2-digit',
|
||||
minute: '2-digit',
|
||||
second: '2-digit',
|
||||
timeZone: timezone,
|
||||
dateStyle: date_style,
|
||||
timeStyle: time_style,
|
||||
timeZone: timezone || Intl.DateTimeFormat().resolvedOptions().timeZone,
|
||||
hour12: time_format !== 'browser' ? time_format == '12hour' : undefined,
|
||||
});
|
||||
return formatter.format(date);
|
||||
} catch (error) {
|
||||
@@ -87,3 +114,18 @@ export const getDurationFromTimestamps = (start_time: number, end_time: number |
|
||||
}
|
||||
return duration;
|
||||
};
|
||||
|
||||
/**
|
||||
* Adapted from https://stackoverflow.com/a/29268535 this takes a timezone string and
|
||||
* returns the offset of that timezone from UTC in minutes.
|
||||
* @param timezone string representation of the timezone the user is requesting
|
||||
* @returns number of minutes offset from UTC
|
||||
*/
|
||||
const getUTCOffset = (date: Date, timezone: string): number => {
|
||||
const utcDate = new Date(date.getTime() - (date.getTimezoneOffset() * 60 * 1000));
|
||||
// locale of en-CA is required for proper locale format
|
||||
let iso = utcDate.toLocaleString('en-CA', { timeZone: timezone, hour12: false }).replace(', ', 'T');
|
||||
iso += '.' + utcDate.getMilliseconds().toString().padStart(3, '0');
|
||||
const target = new Date(iso + 'Z');
|
||||
return (target.getTime() - utcDate.getTime()) / 60 / 1000;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user