forked from Github/frigate
Compare commits
83 Commits
v0.15.0-be
...
v0.15.0-be
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e76f4e9bd9 | ||
|
|
0df091f387 | ||
|
|
66277fbb6c | ||
|
|
a67ff3843a | ||
|
|
9ae839ad72 | ||
|
|
66f71aecf7 | ||
|
|
0b203a3673 | ||
|
|
26c3f9f914 | ||
|
|
474c248c9d | ||
|
|
5b1b6b5be0 | ||
|
|
45e9030358 | ||
|
|
f9c1600f0d | ||
|
|
ad85f8882b | ||
|
|
206ed06905 | ||
|
|
e407ba47c2 | ||
|
|
7fdf42a56f | ||
|
|
4eea541352 | ||
|
|
ed9c67804a | ||
|
|
9c20cd5f7b | ||
|
|
6c86827d3a | ||
|
|
d2b2f3d54d | ||
|
|
64b3397f8e | ||
|
|
0829517b72 | ||
|
|
c1bfc1df67 | ||
|
|
96c0c43dc8 | ||
|
|
a68c7f4ef8 | ||
|
|
7c474e6827 | ||
|
|
143bab87f1 | ||
|
|
580f35112e | ||
|
|
3249ffb273 | ||
|
|
7bae9463b2 | ||
|
|
ae30ac6e3c | ||
|
|
46ed520886 | ||
|
|
ace02a6dfa | ||
|
|
0d59754be2 | ||
|
|
15bd26c9b1 | ||
|
|
bc371acb3e | ||
|
|
2eb5fbf112 | ||
|
|
fc0fb158d5 | ||
|
|
404807c697 | ||
|
|
29ea7c53f2 | ||
|
|
1fc4af9c86 | ||
|
|
ac762762c3 | ||
|
|
553676aade | ||
|
|
a13b9815f6 | ||
|
|
156e7cc628 | ||
|
|
959ca0f412 | ||
|
|
9755fa0537 | ||
|
|
77ec86d31a | ||
|
|
189d4b459f | ||
|
|
44f40966e7 | ||
|
|
7d3313e732 | ||
|
|
591b50dfa7 | ||
|
|
27ef661fec | ||
|
|
d7935abc14 | ||
|
|
11068aa9d0 | ||
|
|
1234003527 | ||
|
|
e5ebf938f6 | ||
|
|
8c2c07fd18 | ||
|
|
9e1a50c3be | ||
|
|
ac8ddada0b | ||
|
|
885485da70 | ||
|
|
bb4e863e87 | ||
|
|
c7a4220d65 | ||
|
|
03dd9b2d42 | ||
|
|
89ca085b94 | ||
|
|
fffd9defea | ||
|
|
d10fea6012 | ||
|
|
ab26aee8b2 | ||
|
|
bb80a7b2ee | ||
|
|
e4a6b29279 | ||
|
|
d12c7809dd | ||
|
|
357ce0382e | ||
|
|
73da3d9b20 | ||
|
|
e67b7a6d5e | ||
|
|
4e25bebdd0 | ||
|
|
abd22d2566 | ||
|
|
8aeb597780 | ||
|
|
af844ea9d5 | ||
|
|
51509760e3 | ||
|
|
f86957e5e1 | ||
|
|
2a15b95f18 | ||
|
|
039ab1ccd7 |
@@ -12,6 +12,7 @@ argmax
|
|||||||
argmin
|
argmin
|
||||||
argpartition
|
argpartition
|
||||||
ascontiguousarray
|
ascontiguousarray
|
||||||
|
astype
|
||||||
authelia
|
authelia
|
||||||
authentik
|
authentik
|
||||||
autodetected
|
autodetected
|
||||||
@@ -195,6 +196,7 @@ poweroff
|
|||||||
preexec
|
preexec
|
||||||
probesize
|
probesize
|
||||||
protobuf
|
protobuf
|
||||||
|
pstate
|
||||||
psutil
|
psutil
|
||||||
pubkey
|
pubkey
|
||||||
putenv
|
putenv
|
||||||
@@ -278,6 +280,7 @@ uvicorn
|
|||||||
vaapi
|
vaapi
|
||||||
vainfo
|
vainfo
|
||||||
variations
|
variations
|
||||||
|
vbios
|
||||||
vconcat
|
vconcat
|
||||||
vitb
|
vitb
|
||||||
vstream
|
vstream
|
||||||
|
|||||||
@@ -3,10 +3,12 @@
|
|||||||
set -euxo pipefail
|
set -euxo pipefail
|
||||||
|
|
||||||
# Cleanup the old github host key
|
# Cleanup the old github host key
|
||||||
sed -i -e '/AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31\/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi\/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==/d' ~/.ssh/known_hosts
|
if [[ -f ~/.ssh/known_hosts ]]; then
|
||||||
# Add new github host key
|
# Add new github host key
|
||||||
curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \
|
sed -i -e '/AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31\/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi\/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==/d' ~/.ssh/known_hosts
|
||||||
sed -e 's/^/github.com /' >> ~/.ssh/known_hosts
|
curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \
|
||||||
|
sed -e 's/^/github.com /' >> ~/.ssh/known_hosts
|
||||||
|
fi
|
||||||
|
|
||||||
# Frigate normal container runs as root, so it have permission to create
|
# Frigate normal container runs as root, so it have permission to create
|
||||||
# the folders. But the devcontainer runs as the host user, so we need to
|
# the folders. But the devcontainer runs as the host user, so we need to
|
||||||
|
|||||||
1
.github/pull_request_template.md
vendored
1
.github/pull_request_template.md
vendored
@@ -13,6 +13,7 @@
|
|||||||
- [ ] New feature
|
- [ ] New feature
|
||||||
- [ ] Breaking change (fix/feature causing existing functionality to break)
|
- [ ] Breaking change (fix/feature causing existing functionality to break)
|
||||||
- [ ] Code quality improvements to existing code
|
- [ ] Code quality improvements to existing code
|
||||||
|
- [ ] Documentation Update
|
||||||
|
|
||||||
## Additional information
|
## Additional information
|
||||||
|
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ services:
|
|||||||
# count: 1
|
# count: 1
|
||||||
# capabilities: [gpu]
|
# capabilities: [gpu]
|
||||||
environment:
|
environment:
|
||||||
YOLO_MODELS: yolov7-320
|
YOLO_MODELS: ""
|
||||||
devices:
|
devices:
|
||||||
- /dev/bus/usb:/dev/bus/usb
|
- /dev/bus/usb:/dev/bus/usb
|
||||||
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware
|
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware
|
||||||
|
|||||||
@@ -16,89 +16,25 @@ RUN mkdir /h8l-wheels
|
|||||||
# Build the wheels
|
# Build the wheels
|
||||||
RUN pip3 wheel --wheel-dir=/h8l-wheels -c /requirements-wheels.txt -r /requirements-wheels-h8l.txt
|
RUN pip3 wheel --wheel-dir=/h8l-wheels -c /requirements-wheels.txt -r /requirements-wheels-h8l.txt
|
||||||
|
|
||||||
# Build HailoRT and create wheel
|
FROM wget AS hailort
|
||||||
FROM wheels AS build-hailort
|
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
RUN --mount=type=bind,source=docker/hailo8l/install_hailort.sh,target=/deps/install_hailort.sh \
|
||||||
SHELL ["/bin/bash", "-c"]
|
/deps/install_hailort.sh
|
||||||
|
|
||||||
# Install necessary APT packages
|
|
||||||
RUN apt-get -qq update \
|
|
||||||
&& apt-get -qq install -y \
|
|
||||||
apt-transport-https \
|
|
||||||
gnupg \
|
|
||||||
wget \
|
|
||||||
# the key fingerprint can be obtained from https://ftp-master.debian.org/keys.html
|
|
||||||
&& wget -qO- "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xA4285295FC7B1A81600062A9605C66F00D6C9793" | \
|
|
||||||
gpg --dearmor > /usr/share/keyrings/debian-archive-bullseye-stable.gpg \
|
|
||||||
&& echo "deb [signed-by=/usr/share/keyrings/debian-archive-bullseye-stable.gpg] http://deb.debian.org/debian bullseye main contrib non-free" | \
|
|
||||||
tee /etc/apt/sources.list.d/debian-bullseye-nonfree.list \
|
|
||||||
&& apt-get -qq update \
|
|
||||||
&& apt-get -qq install -y \
|
|
||||||
python3.9 \
|
|
||||||
python3.9-dev \
|
|
||||||
build-essential cmake git \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Extract Python version and set environment variables
|
|
||||||
RUN PYTHON_VERSION=$(python3 --version 2>&1 | awk '{print $2}' | cut -d. -f1,2) && \
|
|
||||||
PYTHON_VERSION_NO_DOT=$(echo $PYTHON_VERSION | sed 's/\.//') && \
|
|
||||||
echo "PYTHON_VERSION=$PYTHON_VERSION" > /etc/environment && \
|
|
||||||
echo "PYTHON_VERSION_NO_DOT=$PYTHON_VERSION_NO_DOT" >> /etc/environment
|
|
||||||
|
|
||||||
# Clone and build HailoRT
|
|
||||||
RUN . /etc/environment && \
|
|
||||||
git clone https://github.com/hailo-ai/hailort.git /opt/hailort && \
|
|
||||||
cd /opt/hailort && \
|
|
||||||
git checkout v4.18.0 && \
|
|
||||||
cmake -H. -Bbuild -DCMAKE_BUILD_TYPE=Release -DHAILO_BUILD_PYBIND=1 -DPYBIND11_PYTHON_VERSION=${PYTHON_VERSION} && \
|
|
||||||
cmake --build build --config release --target libhailort && \
|
|
||||||
cmake --build build --config release --target _pyhailort && \
|
|
||||||
cp build/hailort/libhailort/bindings/python/src/_pyhailort.cpython-${PYTHON_VERSION_NO_DOT}-$(if [ $TARGETARCH == "amd64" ]; then echo 'x86_64'; else echo 'aarch64'; fi )-linux-gnu.so hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/ && \
|
|
||||||
cp build/hailort/libhailort/src/libhailort.so hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/
|
|
||||||
|
|
||||||
RUN ls -ahl /opt/hailort/build/hailort/libhailort/src/
|
|
||||||
RUN ls -ahl /opt/hailort/hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/
|
|
||||||
|
|
||||||
# Remove the existing setup.py if it exists in the target directory
|
|
||||||
RUN rm -f /opt/hailort/hailort/libhailort/bindings/python/platform/setup.py
|
|
||||||
|
|
||||||
# Copy generate_wheel_conf.py and setup.py
|
|
||||||
COPY docker/hailo8l/pyhailort_build_scripts/generate_wheel_conf.py /opt/hailort/hailort/libhailort/bindings/python/platform/generate_wheel_conf.py
|
|
||||||
COPY docker/hailo8l/pyhailort_build_scripts/setup.py /opt/hailort/hailort/libhailort/bindings/python/platform/setup.py
|
|
||||||
|
|
||||||
# Run the generate_wheel_conf.py script
|
|
||||||
RUN python3 /opt/hailort/hailort/libhailort/bindings/python/platform/generate_wheel_conf.py
|
|
||||||
|
|
||||||
# Create a wheel file using pip3 wheel
|
|
||||||
RUN cd /opt/hailort/hailort/libhailort/bindings/python/platform && \
|
|
||||||
python3 setup.py bdist_wheel --dist-dir /hailo-wheels
|
|
||||||
|
|
||||||
# Use deps as the base image
|
# Use deps as the base image
|
||||||
FROM deps AS h8l-frigate
|
FROM deps AS h8l-frigate
|
||||||
|
|
||||||
# Copy the wheels from the wheels stage
|
# Copy the wheels from the wheels stage
|
||||||
COPY --from=h8l-wheels /h8l-wheels /deps/h8l-wheels
|
COPY --from=h8l-wheels /h8l-wheels /deps/h8l-wheels
|
||||||
COPY --from=build-hailort /hailo-wheels /deps/hailo-wheels
|
COPY --from=hailort /hailo-wheels /deps/hailo-wheels
|
||||||
COPY --from=build-hailort /etc/environment /etc/environment
|
COPY --from=hailort /rootfs/ /
|
||||||
RUN CC=$(python3 -c "import sysconfig; import shlex; cc = sysconfig.get_config_var('CC'); cc_cmd = shlex.split(cc)[0]; print(cc_cmd[:-4] if cc_cmd.endswith('-gcc') else cc_cmd)") && \
|
|
||||||
echo "CC=$CC" >> /etc/environment
|
|
||||||
|
|
||||||
# Install the wheels
|
# Install the wheels
|
||||||
RUN pip3 install -U /deps/h8l-wheels/*.whl
|
RUN pip3 install -U /deps/h8l-wheels/*.whl
|
||||||
RUN pip3 install -U /deps/hailo-wheels/*.whl
|
RUN pip3 install -U /deps/hailo-wheels/*.whl
|
||||||
|
|
||||||
RUN . /etc/environment && \
|
|
||||||
mv /usr/local/lib/python${PYTHON_VERSION}/dist-packages/hailo_platform/pyhailort/libhailort.so /usr/lib/${CC} && \
|
|
||||||
cd /usr/lib/${CC}/ && \
|
|
||||||
ln -s libhailort.so libhailort.so.4.18.0
|
|
||||||
|
|
||||||
# Copy base files from the rootfs stage
|
# Copy base files from the rootfs stage
|
||||||
COPY --from=rootfs / /
|
COPY --from=rootfs / /
|
||||||
|
|
||||||
# Set environment variables for Hailo SDK
|
|
||||||
ENV PATH="/opt/hailort/bin:${PATH}"
|
|
||||||
ENV LD_LIBRARY_PATH="/usr/lib/$(if [ $TARGETARCH == "amd64" ]; then echo 'x86_64'; else echo 'aarch64'; fi )-linux-gnu:${LD_LIBRARY_PATH}"
|
|
||||||
|
|
||||||
# Set workdir
|
# Set workdir
|
||||||
WORKDIR /opt/frigate/
|
WORKDIR /opt/frigate/
|
||||||
|
|||||||
@@ -1,3 +1,9 @@
|
|||||||
|
target wget {
|
||||||
|
dockerfile = "docker/main/Dockerfile"
|
||||||
|
platforms = ["linux/arm64","linux/amd64"]
|
||||||
|
target = "wget"
|
||||||
|
}
|
||||||
|
|
||||||
target wheels {
|
target wheels {
|
||||||
dockerfile = "docker/main/Dockerfile"
|
dockerfile = "docker/main/Dockerfile"
|
||||||
platforms = ["linux/arm64","linux/amd64"]
|
platforms = ["linux/arm64","linux/amd64"]
|
||||||
@@ -19,6 +25,7 @@ target rootfs {
|
|||||||
target h8l {
|
target h8l {
|
||||||
dockerfile = "docker/hailo8l/Dockerfile"
|
dockerfile = "docker/hailo8l/Dockerfile"
|
||||||
contexts = {
|
contexts = {
|
||||||
|
wget = "target:wget"
|
||||||
wheels = "target:wheels"
|
wheels = "target:wheels"
|
||||||
deps = "target:deps"
|
deps = "target:deps"
|
||||||
rootfs = "target:rootfs"
|
rootfs = "target:rootfs"
|
||||||
|
|||||||
21
docker/hailo8l/install_hailort.sh
Executable file
21
docker/hailo8l/install_hailort.sh
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -euxo pipefail
|
||||||
|
|
||||||
|
hailo_version="4.19.0"
|
||||||
|
|
||||||
|
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||||
|
arch="x86_64"
|
||||||
|
elif [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||||
|
arch="aarch64"
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p /rootfs
|
||||||
|
|
||||||
|
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${TARGETARCH}.tar.gz" |
|
||||||
|
tar -C /rootfs/ -xzf -
|
||||||
|
|
||||||
|
mkdir -p /hailo-wheels
|
||||||
|
|
||||||
|
wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp39-cp39-linux_${arch}.whl"
|
||||||
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
import json
|
|
||||||
import os
|
|
||||||
import platform
|
|
||||||
import sys
|
|
||||||
import sysconfig
|
|
||||||
|
|
||||||
|
|
||||||
def extract_toolchain_info(compiler):
|
|
||||||
# Remove the "-gcc" or "-g++" suffix if present
|
|
||||||
if compiler.endswith("-gcc") or compiler.endswith("-g++"):
|
|
||||||
compiler = compiler.rsplit("-", 1)[0]
|
|
||||||
|
|
||||||
# Extract the toolchain and ABI part (e.g., "gnu")
|
|
||||||
toolchain_parts = compiler.split("-")
|
|
||||||
abi_conventions = next(
|
|
||||||
(part for part in toolchain_parts if part in ["gnu", "musl", "eabi", "uclibc"]),
|
|
||||||
"",
|
|
||||||
)
|
|
||||||
|
|
||||||
return abi_conventions
|
|
||||||
|
|
||||||
|
|
||||||
def generate_wheel_conf():
|
|
||||||
conf_file_path = os.path.join(
|
|
||||||
os.path.abspath(os.path.dirname(__file__)), "wheel_conf.json"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Extract current system and Python version information
|
|
||||||
py_version = f"cp{sys.version_info.major}{sys.version_info.minor}"
|
|
||||||
arch = platform.machine()
|
|
||||||
system = platform.system().lower()
|
|
||||||
libc_version = platform.libc_ver()[1]
|
|
||||||
|
|
||||||
# Get the compiler information
|
|
||||||
compiler = sysconfig.get_config_var("CC")
|
|
||||||
abi_conventions = extract_toolchain_info(compiler)
|
|
||||||
|
|
||||||
# Create the new configuration data
|
|
||||||
new_conf_data = {
|
|
||||||
"py_version": py_version,
|
|
||||||
"arch": arch,
|
|
||||||
"system": system,
|
|
||||||
"libc_version": libc_version,
|
|
||||||
"abi": abi_conventions,
|
|
||||||
"extension": {
|
|
||||||
"posix": "so",
|
|
||||||
"nt": "pyd", # Windows
|
|
||||||
}[os.name],
|
|
||||||
}
|
|
||||||
|
|
||||||
# If the file exists, load the existing data
|
|
||||||
if os.path.isfile(conf_file_path):
|
|
||||||
with open(conf_file_path, "r") as conf_file:
|
|
||||||
conf_data = json.load(conf_file)
|
|
||||||
# Update the existing data with the new data
|
|
||||||
conf_data.update(new_conf_data)
|
|
||||||
else:
|
|
||||||
# If the file does not exist, use the new data
|
|
||||||
conf_data = new_conf_data
|
|
||||||
|
|
||||||
# Write the updated data to the file
|
|
||||||
with open(conf_file_path, "w") as conf_file:
|
|
||||||
json.dump(conf_data, conf_file, indent=4)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
generate_wheel_conf()
|
|
||||||
@@ -1,111 +0,0 @@
|
|||||||
import json
|
|
||||||
import os
|
|
||||||
|
|
||||||
from setuptools import find_packages, setup
|
|
||||||
from wheel.bdist_wheel import bdist_wheel as orig_bdist_wheel
|
|
||||||
|
|
||||||
|
|
||||||
class NonPurePythonBDistWheel(orig_bdist_wheel):
|
|
||||||
"""Makes the wheel platform-dependent so it can be based on the _pyhailort architecture"""
|
|
||||||
|
|
||||||
def finalize_options(self):
|
|
||||||
orig_bdist_wheel.finalize_options(self)
|
|
||||||
self.root_is_pure = False
|
|
||||||
|
|
||||||
|
|
||||||
def _get_hailort_lib_path():
|
|
||||||
lib_filename = "libhailort.so"
|
|
||||||
lib_path = os.path.join(
|
|
||||||
os.path.abspath(os.path.dirname(__file__)),
|
|
||||||
f"hailo_platform/pyhailort/{lib_filename}",
|
|
||||||
)
|
|
||||||
if os.path.exists(lib_path):
|
|
||||||
print(f"Found libhailort shared library at: {lib_path}")
|
|
||||||
else:
|
|
||||||
print(f"Error: libhailort shared library not found at: {lib_path}")
|
|
||||||
raise FileNotFoundError(f"libhailort shared library not found at: {lib_path}")
|
|
||||||
return lib_path
|
|
||||||
|
|
||||||
|
|
||||||
def _get_pyhailort_lib_path():
|
|
||||||
conf_file_path = os.path.join(
|
|
||||||
os.path.abspath(os.path.dirname(__file__)), "wheel_conf.json"
|
|
||||||
)
|
|
||||||
if not os.path.isfile(conf_file_path):
|
|
||||||
raise FileNotFoundError(f"Configuration file not found: {conf_file_path}")
|
|
||||||
|
|
||||||
with open(conf_file_path, "r") as conf_file:
|
|
||||||
content = json.load(conf_file)
|
|
||||||
py_version = content["py_version"]
|
|
||||||
arch = content["arch"]
|
|
||||||
system = content["system"]
|
|
||||||
extension = content["extension"]
|
|
||||||
abi = content["abi"]
|
|
||||||
|
|
||||||
# Construct the filename directly
|
|
||||||
lib_filename = f"_pyhailort.cpython-{py_version.split('cp')[1]}-{arch}-{system}-{abi}.{extension}"
|
|
||||||
lib_path = os.path.join(
|
|
||||||
os.path.abspath(os.path.dirname(__file__)),
|
|
||||||
f"hailo_platform/pyhailort/{lib_filename}",
|
|
||||||
)
|
|
||||||
|
|
||||||
if os.path.exists(lib_path):
|
|
||||||
print(f"Found _pyhailort shared library at: {lib_path}")
|
|
||||||
else:
|
|
||||||
print(f"Error: _pyhailort shared library not found at: {lib_path}")
|
|
||||||
raise FileNotFoundError(
|
|
||||||
f"_pyhailort shared library not found at: {lib_path}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return lib_path
|
|
||||||
|
|
||||||
|
|
||||||
def _get_package_paths():
|
|
||||||
packages = []
|
|
||||||
pyhailort_lib = _get_pyhailort_lib_path()
|
|
||||||
hailort_lib = _get_hailort_lib_path()
|
|
||||||
if pyhailort_lib:
|
|
||||||
packages.append(pyhailort_lib)
|
|
||||||
if hailort_lib:
|
|
||||||
packages.append(hailort_lib)
|
|
||||||
packages.append(os.path.abspath("hailo_tutorials/notebooks/*"))
|
|
||||||
packages.append(os.path.abspath("hailo_tutorials/hefs/*"))
|
|
||||||
return packages
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
setup(
|
|
||||||
author="Hailo team",
|
|
||||||
author_email="contact@hailo.ai",
|
|
||||||
cmdclass={
|
|
||||||
"bdist_wheel": NonPurePythonBDistWheel,
|
|
||||||
},
|
|
||||||
description="HailoRT",
|
|
||||||
entry_points={
|
|
||||||
"console_scripts": [
|
|
||||||
"hailo=hailo_platform.tools.hailocli.main:main",
|
|
||||||
]
|
|
||||||
},
|
|
||||||
install_requires=[
|
|
||||||
"argcomplete",
|
|
||||||
"contextlib2",
|
|
||||||
"future",
|
|
||||||
"netaddr",
|
|
||||||
"netifaces",
|
|
||||||
"verboselogs",
|
|
||||||
"numpy==1.23.3",
|
|
||||||
],
|
|
||||||
name="hailort",
|
|
||||||
package_data={
|
|
||||||
"hailo_platform": _get_package_paths(),
|
|
||||||
},
|
|
||||||
packages=find_packages(),
|
|
||||||
platforms=[
|
|
||||||
"linux_x86_64",
|
|
||||||
"linux_aarch64",
|
|
||||||
"win_amd64",
|
|
||||||
],
|
|
||||||
url="https://hailo.ai/",
|
|
||||||
version="4.17.0",
|
|
||||||
zip_safe=False,
|
|
||||||
)
|
|
||||||
@@ -13,7 +13,7 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Clone the HailoRT driver repository
|
# Clone the HailoRT driver repository
|
||||||
git clone --depth 1 --branch v4.18.0 https://github.com/hailo-ai/hailort-drivers.git
|
git clone --depth 1 --branch v4.19.0 https://github.com/hailo-ai/hailort-drivers.git
|
||||||
|
|
||||||
# Build and install the HailoRT driver
|
# Build and install the HailoRT driver
|
||||||
cd hailort-drivers/linux/pcie
|
cd hailort-drivers/linux/pcie
|
||||||
@@ -38,7 +38,7 @@ cd ../../
|
|||||||
if [ ! -d /lib/firmware/hailo ]; then
|
if [ ! -d /lib/firmware/hailo ]; then
|
||||||
sudo mkdir /lib/firmware/hailo
|
sudo mkdir /lib/firmware/hailo
|
||||||
fi
|
fi
|
||||||
sudo mv hailo8_fw.4.18.0.bin /lib/firmware/hailo/hailo8_fw.bin
|
sudo mv hailo8_fw.*.bin /lib/firmware/hailo/hailo8_fw.bin
|
||||||
|
|
||||||
# Install udev rules
|
# Install udev rules
|
||||||
sudo cp ./linux/pcie/51-hailo-udev.rules /etc/udev/rules.d/
|
sudo cp ./linux/pcie/51-hailo-udev.rules /etc/udev/rules.d/
|
||||||
|
|||||||
@@ -211,6 +211,9 @@ ENV TOKENIZERS_PARALLELISM=true
|
|||||||
# https://github.com/huggingface/transformers/issues/27214
|
# https://github.com/huggingface/transformers/issues/27214
|
||||||
ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
|
ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
|
||||||
|
|
||||||
|
# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
|
||||||
|
ENV OPENCV_FFMPEG_LOGLEVEL=8
|
||||||
|
|
||||||
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
||||||
ENV LIBAVFORMAT_VERSION_MAJOR=60
|
ENV LIBAVFORMAT_VERSION_MAJOR=60
|
||||||
|
|
||||||
|
|||||||
@@ -87,8 +87,8 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
|||||||
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
|
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||||
apt-get -qq update
|
apt-get -qq update
|
||||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||||
intel-opencl-icd intel-level-zero-gpu intel-media-va-driver-non-free \
|
intel-opencl-icd=24.35.30872.31-996~22.04 intel-level-zero-gpu=1.3.29735.27-914~22.04 intel-media-va-driver-non-free=24.3.3-996~22.04 \
|
||||||
libmfx1 libmfxgen1 libvpl2
|
libmfx1=23.2.2-880~22.04 libmfxgen1=24.2.4-914~22.04 libvpl2=1:2.13.0.0-996~22.04
|
||||||
|
|
||||||
rm -f /usr/share/keyrings/intel-graphics.gpg
|
rm -f /usr/share/keyrings/intel-graphics.gpg
|
||||||
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
|
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
click == 8.1.*
|
click == 8.1.*
|
||||||
# FastAPI
|
# FastAPI
|
||||||
|
aiohttp == 3.11.2
|
||||||
|
starlette == 0.41.2
|
||||||
starlette-context == 0.3.6
|
starlette-context == 0.3.6
|
||||||
fastapi == 0.115.*
|
fastapi == 0.115.*
|
||||||
uvicorn == 0.30.*
|
uvicorn == 0.30.*
|
||||||
|
|||||||
@@ -165,7 +165,7 @@ if config.get("birdseye", {}).get("restream", False):
|
|||||||
birdseye: dict[str, any] = config.get("birdseye")
|
birdseye: dict[str, any] = config.get("birdseye")
|
||||||
|
|
||||||
input = f"-f rawvideo -pix_fmt yuv420p -video_size {birdseye.get('width', 1280)}x{birdseye.get('height', 720)} -r 10 -i {BIRDSEYE_PIPE}"
|
input = f"-f rawvideo -pix_fmt yuv420p -video_size {birdseye.get('width', 1280)}x{birdseye.get('height', 720)} -r 10 -i {BIRDSEYE_PIPE}"
|
||||||
ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(ffmpeg_path, config.get('ffmpeg', {}).get('hwaccel_args'), input, '-rtsp_transport tcp -f rtsp {output}')}"
|
ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(ffmpeg_path, config.get('ffmpeg', {}).get('hwaccel_args', ''), input, '-rtsp_transport tcp -f rtsp {output}')}"
|
||||||
|
|
||||||
if go2rtc_config.get("streams"):
|
if go2rtc_config.get("streams"):
|
||||||
go2rtc_config["streams"]["birdseye"] = ffmpeg_cmd
|
go2rtc_config["streams"]["birdseye"] = ffmpeg_cmd
|
||||||
|
|||||||
@@ -10,8 +10,8 @@ ARG DEBIAN_FRONTEND
|
|||||||
# Use a separate container to build wheels to prevent build dependencies in final image
|
# Use a separate container to build wheels to prevent build dependencies in final image
|
||||||
RUN apt-get -qq update \
|
RUN apt-get -qq update \
|
||||||
&& apt-get -qq install -y --no-install-recommends \
|
&& apt-get -qq install -y --no-install-recommends \
|
||||||
python3.9 python3.9-dev \
|
python3.9 python3.9-dev \
|
||||||
wget build-essential cmake git \
|
wget build-essential cmake git \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Ensure python3 defaults to python3.9
|
# Ensure python3 defaults to python3.9
|
||||||
@@ -41,7 +41,11 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,t
|
|||||||
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
|
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
|
||||||
|
|
||||||
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
|
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
|
||||||
RUN pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
|
ADD https://nvidia.box.com/shared/static/9aemm4grzbbkfaesg5l7fplgjtmswhj8.whl /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
|
||||||
|
|
||||||
|
RUN pip3 uninstall -y onnxruntime-openvino \
|
||||||
|
&& pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt \
|
||||||
|
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
|
||||||
|
|
||||||
FROM build-wheels AS trt-model-wheels
|
FROM build-wheels AS trt-model-wheels
|
||||||
ARG DEBIAN_FRONTEND
|
ARG DEBIAN_FRONTEND
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
|||||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||||
COPY docker/tensorrt/detector/rootfs/ /
|
COPY docker/tensorrt/detector/rootfs/ /
|
||||||
ENV YOLO_MODELS="yolov7-320"
|
ENV YOLO_MODELS=""
|
||||||
|
|
||||||
HEALTHCHECK --start-period=600s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
|
HEALTHCHECK --start-period=600s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
|
||||||
CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
|
CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ set -o errexit -o nounset -o pipefail
|
|||||||
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"}
|
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"}
|
||||||
TRT_VER=${TRT_VER:-$(cat /etc/TENSORRT_VER)}
|
TRT_VER=${TRT_VER:-$(cat /etc/TENSORRT_VER)}
|
||||||
OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}"
|
OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}"
|
||||||
|
YOLO_MODELS=${YOLO_MODELS:-""}
|
||||||
|
|
||||||
# Create output folder
|
# Create output folder
|
||||||
mkdir -p ${OUTPUT_FOLDER}
|
mkdir -p ${OUTPUT_FOLDER}
|
||||||
@@ -19,6 +20,11 @@ FIRST_MODEL=true
|
|||||||
MODEL_DOWNLOAD=""
|
MODEL_DOWNLOAD=""
|
||||||
MODEL_CONVERT=""
|
MODEL_CONVERT=""
|
||||||
|
|
||||||
|
if [ -z "$YOLO_MODELS"]; then
|
||||||
|
echo "tensorrt model preparation disabled"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
for model in ${YOLO_MODELS//,/ }
|
for model in ${YOLO_MODELS//,/ }
|
||||||
do
|
do
|
||||||
# Remove old link in case path/version changed
|
# Remove old link in case path/version changed
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
cuda-python == 11.7; platform_machine == 'aarch64'
|
cuda-python == 11.7; platform_machine == 'aarch64'
|
||||||
@@ -181,7 +181,7 @@ go2rtc:
|
|||||||
- rtspx://192.168.1.1:7441/abcdefghijk
|
- rtspx://192.168.1.1:7441/abcdefghijk
|
||||||
```
|
```
|
||||||
|
|
||||||
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-rtsp)
|
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-rtsp)
|
||||||
|
|
||||||
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.
|
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.
|
||||||
|
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ This list of working and non-working PTZ cameras is based on user feedback.
|
|||||||
| Reolink E1 Zoom | ✅ | ❌ | |
|
| Reolink E1 Zoom | ✅ | ❌ | |
|
||||||
| Reolink RLC-823A 16x | ✅ | ❌ | |
|
| Reolink RLC-823A 16x | ✅ | ❌ | |
|
||||||
| Speco O8P32X | ✅ | ❌ | |
|
| Speco O8P32X | ✅ | ❌ | |
|
||||||
| Sunba 405-D20X | ✅ | ❌ | |
|
| Sunba 405-D20X | ✅ | ❌ | Incomplete ONVIF support reported on original, and 4k models. All models are suspected incompatable. |
|
||||||
| Tapo | ✅ | ❌ | Many models supported, ONVIF Service Port: 2020 |
|
| Tapo | ✅ | ❌ | Many models supported, ONVIF Service Port: 2020 |
|
||||||
| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands |
|
| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands |
|
||||||
| Uniview IPC6612SR-X33-VG | ✅ | ✅ | Leave `calibrate_on_startup` as `False`. A user has reported that zooming with `absolute` is working. |
|
| Uniview IPC6612SR-X33-VG | ✅ | ✅ | Leave `calibrate_on_startup` as `False`. A user has reported that zooming with `absolute` is working. |
|
||||||
|
|||||||
@@ -3,9 +3,13 @@ id: genai
|
|||||||
title: Generative AI
|
title: Generative AI
|
||||||
---
|
---
|
||||||
|
|
||||||
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects.
|
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
|
||||||
|
|
||||||
Semantic Search must be enabled to use Generative AI. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
|
:::info
|
||||||
|
|
||||||
|
Semantic Search must be enabled to use Generative AI.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
@@ -31,15 +35,15 @@ cameras:
|
|||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
|
|
||||||
Using Ollama on CPU is not recommended, high inference times make using generative AI impractical.
|
Using Ollama on CPU is not recommended, high inference times make using Generative AI impractical.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
|
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
|
||||||
|
|
||||||
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [docker container](https://hub.docker.com/r/ollama/ollama) available.
|
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available.
|
||||||
|
|
||||||
Parallel requests also come with some caveats. See the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-does-ollama-handle-concurrent-requests).
|
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-does-ollama-handle-concurrent-requests).
|
||||||
|
|
||||||
### Supported Models
|
### Supported Models
|
||||||
|
|
||||||
@@ -138,6 +142,10 @@ Frigate's thumbnail search excels at identifying specific details about tracked
|
|||||||
|
|
||||||
While generating simple descriptions of detected objects is useful, understanding intent provides a deeper layer of insight. Instead of just recognizing "what" is in a scene, Frigate’s default prompts aim to infer "why" it might be there or "what" it could do next. Descriptions tell you what’s happening, but intent gives context. For instance, a person walking toward a door might seem like a visitor, but if they’re moving quickly after hours, you can infer a potential break-in attempt. Detecting a person loitering near a door at night can trigger an alert sooner than simply noting "a person standing by the door," helping you respond based on the situation’s context.
|
While generating simple descriptions of detected objects is useful, understanding intent provides a deeper layer of insight. Instead of just recognizing "what" is in a scene, Frigate’s default prompts aim to infer "why" it might be there or "what" it could do next. Descriptions tell you what’s happening, but intent gives context. For instance, a person walking toward a door might seem like a visitor, but if they’re moving quickly after hours, you can infer a potential break-in attempt. Detecting a person loitering near a door at night can trigger an alert sooner than simply noting "a person standing by the door," helping you respond based on the situation’s context.
|
||||||
|
|
||||||
|
### Using GenAI for notifications
|
||||||
|
|
||||||
|
Frigate provides an [MQTT topic](/integrations/mqtt), `frigate/tracked_object_update`, that is updated with a JSON payload containing `event_id` and `description` when your AI provider returns a description for a tracked object. This description could be used directly in notifications, such as sending alerts to your phone or making audio announcements. If additional details from the tracked object are needed, you can query the [HTTP API](/integrations/api/event-events-event-id-get) using the `event_id`, eg: `http://frigate_ip:5000/api/events/<event_id>`.
|
||||||
|
|
||||||
## Custom Prompts
|
## Custom Prompts
|
||||||
|
|
||||||
Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:
|
Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:
|
||||||
@@ -168,7 +176,7 @@ genai:
|
|||||||
|
|
||||||
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
|
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
|
||||||
|
|
||||||
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the thumbnails collected over the object's lifetime to the model. Using a snapshot provides the AI with a higher-resolution image (typically downscaled by the AI itself), but the trade-off is that only a single image is used, which might limit the model's ability to determine object movement or direction.
|
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
cameras:
|
cameras:
|
||||||
|
|||||||
@@ -22,14 +22,14 @@ Frigate supports multiple different detectors that work on different types of ha
|
|||||||
- [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured.
|
- [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured.
|
||||||
|
|
||||||
**Nvidia**
|
**Nvidia**
|
||||||
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs, using one of many default models.
|
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs and Jetson devices, using one of many default models.
|
||||||
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
|
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` or `-tensorrt-jp(4/5)` Frigate images when a supported ONNX model is configured.
|
||||||
|
|
||||||
**Rockchip**
|
**Rockchip**
|
||||||
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
|
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
|
||||||
|
|
||||||
**For Testing**
|
**For Testing**
|
||||||
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
|
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
@@ -223,7 +223,7 @@ The model used for TensorRT must be preprocessed on the same hardware platform t
|
|||||||
|
|
||||||
The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host.
|
The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host.
|
||||||
|
|
||||||
By default, the `yolov7-320` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
|
By default, no models will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
|
||||||
|
|
||||||
If you have a Jetson device with DLAs (Xavier or Orin), you can generate a model that will run on the DLA by appending `-dla` to your model name, e.g. specify `YOLO_MODELS=yolov7-320-dla`. The model will run on DLA0 (Frigate does not currently support DLA1). DLA-incompatible layers will fall back to running on the GPU.
|
If you have a Jetson device with DLAs (Xavier or Orin), you can generate a model that will run on the DLA by appending `-dla` to your model name, e.g. specify `YOLO_MODELS=yolov7-320-dla`. The model will run on DLA0 (Frigate does not currently support DLA1). DLA-incompatible layers will fall back to running on the GPU.
|
||||||
|
|
||||||
@@ -264,7 +264,7 @@ An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yol
|
|||||||
```yml
|
```yml
|
||||||
frigate:
|
frigate:
|
||||||
environment:
|
environment:
|
||||||
- YOLO_MODELS=yolov4-608,yolov7x-640
|
- YOLO_MODELS=yolov7-320,yolov7x-640
|
||||||
- USE_FP16=false
|
- USE_FP16=false
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -415,6 +415,24 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl
|
|||||||
|
|
||||||
ONNX is an open format for building machine learning models, Frigate supports running ONNX models on CPU, OpenVINO, and TensorRT. On startup Frigate will automatically try to use a GPU if one is available.
|
ONNX is an open format for building machine learning models, Frigate supports running ONNX models on CPU, OpenVINO, and TensorRT. On startup Frigate will automatically try to use a GPU if one is available.
|
||||||
|
|
||||||
|
:::info
|
||||||
|
|
||||||
|
If the correct build is used for your GPU then the GPU will be detected and used automatically.
|
||||||
|
|
||||||
|
- **AMD**
|
||||||
|
|
||||||
|
- ROCm will automatically be detected and used with the ONNX detector in the `-rocm` Frigate image.
|
||||||
|
|
||||||
|
- **Intel**
|
||||||
|
|
||||||
|
- OpenVINO will automatically be detected and used with the ONNX detector in the default Frigate image.
|
||||||
|
|
||||||
|
- **Nvidia**
|
||||||
|
- Nvidia GPUs will automatically be detected and used with the ONNX detector in the `-tensorrt` Frigate image.
|
||||||
|
- Jetson devices will automatically be detected and used with the ONNX detector in the `-tensorrt-jp(4/5)` Frigate image.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
|
|
||||||
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
|
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
|
||||||
@@ -457,6 +475,7 @@ model:
|
|||||||
width: 320 # <--- should match whatever was set in notebook
|
width: 320 # <--- should match whatever was set in notebook
|
||||||
height: 320 # <--- should match whatever was set in notebook
|
height: 320 # <--- should match whatever was set in notebook
|
||||||
input_pixel_format: bgr
|
input_pixel_format: bgr
|
||||||
|
input_tensor: nchw
|
||||||
path: /config/yolo_nas_s.onnx
|
path: /config/yolo_nas_s.onnx
|
||||||
labelmap_path: /labelmap/coco-80.txt
|
labelmap_path: /labelmap/coco-80.txt
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ title: Available Objects
|
|||||||
|
|
||||||
import labels from "../../../labelmap.txt";
|
import labels from "../../../labelmap.txt";
|
||||||
|
|
||||||
Frigate includes the object models listed below from the Google Coral test data.
|
Frigate includes the object labels listed below from the Google Coral test data.
|
||||||
|
|
||||||
Please note:
|
Please note:
|
||||||
|
|
||||||
|
|||||||
@@ -548,10 +548,12 @@ genai:
|
|||||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.2)
|
# Uses https://github.com/AlexxIT/go2rtc (v1.9.2)
|
||||||
go2rtc:
|
go2rtc:
|
||||||
|
|
||||||
# Optional: jsmpeg stream configuration for WebUI
|
# Optional: Live stream configuration for WebUI.
|
||||||
|
# NOTE: Can be overridden at the camera level
|
||||||
live:
|
live:
|
||||||
# Optional: Set the name of the stream that should be used for live view
|
# Optional: Set the name of the stream configured in go2rtc
|
||||||
# in frigate WebUI. (default: name of camera)
|
# that should be used for live view in frigate WebUI. (default: name of camera)
|
||||||
|
# NOTE: In most cases this should be set at the camera level only.
|
||||||
stream_name: camera_name
|
stream_name: camera_name
|
||||||
# Optional: Set the height of the jsmpeg stream. (default: 720)
|
# Optional: Set the height of the jsmpeg stream. (default: 720)
|
||||||
# This must be less than or equal to the height of the detect stream. Lower resolutions
|
# This must be less than or equal to the height of the detect stream. Lower resolutions
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ title: Restream
|
|||||||
|
|
||||||
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
|
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
|
||||||
|
|
||||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.4) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#configuration) for more advanced configurations and features.
|
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.2) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#configuration) for more advanced configurations and features.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
@@ -134,7 +134,7 @@ cameras:
|
|||||||
|
|
||||||
## Advanced Restream Configurations
|
## Advanced Restream Configurations
|
||||||
|
|
||||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||||
|
|
||||||
NOTE: The output will need to be passed with two curly braces `{{output}}`
|
NOTE: The output will need to be passed with two curly braces `{{output}}`
|
||||||
|
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ For best performance, 16GB or more of RAM and a dedicated GPU are recommended.
|
|||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
Semantic search is disabled by default, and must be enabled in your config file before it can be used. Semantic Search is a global configuration setting.
|
Semantic Search is disabled by default, and must be enabled in your config file before it can be used. Semantic Search is a global configuration setting.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
semantic_search:
|
semantic_search:
|
||||||
@@ -41,13 +41,7 @@ The vision model is able to embed both images and text into the same vector spac
|
|||||||
|
|
||||||
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
||||||
|
|
||||||
Differently weighted CLIP models are available and can be selected by setting the `model_size` config option:
|
Differently weighted CLIP models are available and can be selected by setting the `model_size` config option as `small` or `large`:
|
||||||
|
|
||||||
:::tip
|
|
||||||
|
|
||||||
The CLIP models are downloaded in ONNX format, which means they will be accelerated using GPU hardware when available. This depends on the Docker build that is used. See [the object detector docs](../configuration/object_detectors.md) for more information.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
semantic_search:
|
semantic_search:
|
||||||
@@ -56,11 +50,41 @@ semantic_search:
|
|||||||
```
|
```
|
||||||
|
|
||||||
- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
|
- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
|
||||||
- Configuring the `small` model employs a quantized version of the model that uses much less RAM and runs faster on CPU with a very negligible difference in embedding quality.
|
- Configuring the `small` model employs a quantized version of the model that uses less RAM and runs on CPU with a very negligible difference in embedding quality.
|
||||||
|
|
||||||
|
### GPU Acceleration
|
||||||
|
|
||||||
|
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
semantic_search:
|
||||||
|
enabled: True
|
||||||
|
model_size: large
|
||||||
|
```
|
||||||
|
|
||||||
|
:::info
|
||||||
|
|
||||||
|
If the correct build is used for your GPU and the `large` model is configured, then the GPU will be detected and used automatically.
|
||||||
|
|
||||||
|
**NOTE:** Object detection and Semantic Search are independent features. If you want to use your GPU with Semantic Search, you must choose the appropriate Frigate Docker image for your GPU.
|
||||||
|
|
||||||
|
- **AMD**
|
||||||
|
|
||||||
|
- ROCm will automatically be detected and used for Semantic Search in the `-rocm` Frigate image.
|
||||||
|
|
||||||
|
- **Intel**
|
||||||
|
|
||||||
|
- OpenVINO will automatically be detected and used for Semantic Search in the default Frigate image.
|
||||||
|
|
||||||
|
- **Nvidia**
|
||||||
|
- Nvidia GPUs will automatically be detected and used for Semantic Search in the `-tensorrt` Frigate image.
|
||||||
|
- Jetson devices will automatically be detected and used for Semantic Search in the `-tensorrt-jp(4/5)` Frigate image.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
## Usage and Best Practices
|
## Usage and Best Practices
|
||||||
|
|
||||||
1. Semantic search is used in conjunction with the other filters available on the Search page. Use a combination of traditional filtering and semantic search for the best results.
|
1. Semantic Search is used in conjunction with the other filters available on the Search page. Use a combination of traditional filtering and Semantic Search for the best results.
|
||||||
2. Use the thumbnail search type when searching for particular objects in the scene. Use the description search type when attempting to discern the intent of your object.
|
2. Use the thumbnail search type when searching for particular objects in the scene. Use the description search type when attempting to discern the intent of your object.
|
||||||
3. Because of how the AI models Frigate uses have been trained, the comparison between text and image embedding distances generally means that with multi-modal (`thumbnail` and `description`) searches, results matching `description` will appear first, even if a `thumbnail` embedding may be a better match. Play with the "Search Type" setting to help find what you are looking for. Note that if you are generating descriptions for specific objects or zones only, this may cause search results to prioritize the objects with descriptions even if the the ones without them are more relevant.
|
3. Because of how the AI models Frigate uses have been trained, the comparison between text and image embedding distances generally means that with multi-modal (`thumbnail` and `description`) searches, results matching `description` will appear first, even if a `thumbnail` embedding may be a better match. Play with the "Search Type" setting to help find what you are looking for. Note that if you are generating descriptions for specific objects or zones only, this may cause search results to prioritize the objects with descriptions even if the the ones without them are more relevant.
|
||||||
4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day".
|
4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day".
|
||||||
|
|||||||
@@ -81,15 +81,15 @@ You can calculate the **minimum** shm size for each camera with the following fo
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
# Replace <width> and <height>
|
# Replace <width> and <height>
|
||||||
$ python -c 'print("{:.2f}MB".format((<width> * <height> * 1.5 * 10 + 270480) / 1048576))'
|
$ python -c 'print("{:.2f}MB".format((<width> * <height> * 1.5 * 20 + 270480) / 1048576))'
|
||||||
|
|
||||||
# Example for 1280x720
|
# Example for 1280x720, including logs
|
||||||
$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 10 + 270480) / 1048576))'
|
$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 20 + 270480) / 1048576)) + 40'
|
||||||
13.44MB
|
46.63MB
|
||||||
|
|
||||||
# Example for eight cameras detecting at 1280x720, including logs
|
# Example for eight cameras detecting at 1280x720, including logs
|
||||||
$ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 10 + 270480) / 1048576) * 8 + 40))'
|
$ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 20 + 270480) / 1048576) * 8 + 40))'
|
||||||
136.99MB
|
253MB
|
||||||
```
|
```
|
||||||
|
|
||||||
The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration.
|
The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration.
|
||||||
@@ -194,7 +194,7 @@ services:
|
|||||||
privileged: true # this may not be necessary for all setups
|
privileged: true # this may not be necessary for all setups
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
image: ghcr.io/blakeblackshear/frigate:stable
|
image: ghcr.io/blakeblackshear/frigate:stable
|
||||||
shm_size: "64mb" # update for your cameras based on calculation above
|
shm_size: "512mb" # update for your cameras based on calculation above
|
||||||
devices:
|
devices:
|
||||||
- /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions
|
- /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions
|
||||||
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
|
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
|
||||||
|
|||||||
@@ -13,7 +13,15 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
|
|||||||
|
|
||||||
# Setup a go2rtc stream
|
# Setup a go2rtc stream
|
||||||
|
|
||||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. For the best experience, you should set the stream name under go2rtc to match the name of your camera so that Frigate will automatically map it and be able to use better live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#module-streams), not just rtsp.
|
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#module-streams), not just rtsp.
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
|
||||||
|
For the best experience, you should set the stream name under `go2rtc` to match the name of your camera so that Frigate will automatically map it and be able to use better live view options for the camera.
|
||||||
|
|
||||||
|
See [the live view docs](../configuration/live.md#setting-stream-for-live-ui) for more information.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
go2rtc:
|
go2rtc:
|
||||||
@@ -39,8 +47,8 @@ After adding this to the config, restart Frigate and try to watch the live strea
|
|||||||
|
|
||||||
- Check Video Codec:
|
- Check Video Codec:
|
||||||
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
|
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
|
||||||
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#codecs-madness) in go2rtc documentation.
|
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#codecs-madness) in go2rtc documentation.
|
||||||
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
|
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
|
||||||
```yaml
|
```yaml
|
||||||
go2rtc:
|
go2rtc:
|
||||||
streams:
|
streams:
|
||||||
|
|||||||
@@ -306,7 +306,9 @@ By default, Frigate will retain video of all tracked objects for 10 days. The fu
|
|||||||
|
|
||||||
### Step 7: Complete config
|
### Step 7: Complete config
|
||||||
|
|
||||||
At this point you have a complete config with basic functionality. You can see the [full config reference](../configuration/reference.md) for a complete list of configuration options.
|
At this point you have a complete config with basic functionality.
|
||||||
|
- View [common configuration examples](../configuration/index.md#common-configuration-examples) for a list of common configuration examples.
|
||||||
|
- View [full config reference](../configuration/reference.md) for a complete list of configuration options.
|
||||||
|
|
||||||
### Follow up
|
### Follow up
|
||||||
|
|
||||||
|
|||||||
@@ -94,6 +94,18 @@ Message published for each changed tracked object. The first message is publishe
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `frigate/tracked_object_update`
|
||||||
|
|
||||||
|
Message published for updates to tracked object metadata, for example when GenAI runs and returns a tracked object description.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "description",
|
||||||
|
"id": "1607123955.475377-mxklsc",
|
||||||
|
"description": "The car is a red sedan moving away from the camera."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### `frigate/reviews`
|
### `frigate/reviews`
|
||||||
|
|
||||||
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. When additional objects are detected or when a zone change occurs, it will publish a, `update` message with the same id. When the review activity has ended a final `end` message is published.
|
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. When additional objects are detected or when a zone change occurs, it will publish a, `update` message with the same id. When the review activity has ended a final `end` message is published.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ title: Requesting your first model
|
|||||||
|
|
||||||
## Step 1: Upload and annotate your images
|
## Step 1: Upload and annotate your images
|
||||||
|
|
||||||
Before requesting your first model, you will need to upload at least 10 images to Frigate+. But for the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate.
|
Before requesting your first model, you will need to upload and verify at least 1 image to Frigate+. The more images you upload, annotate, and verify the better your results will be. Most users start to see very good results once they have at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate.
|
||||||
|
|
||||||
It is recommended to submit **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
It is recommended to submit **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
||||||
|
|
||||||
@@ -13,7 +13,7 @@ For more detailed recommendations, you can refer to the docs on [improving your
|
|||||||
|
|
||||||
## Step 2: Submit a model request
|
## Step 2: Submit a model request
|
||||||
|
|
||||||
Once you have an initial set of verified images, you can request a model on the Models page. Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours.
|
Once you have an initial set of verified images, you can request a model on the Models page. For guidance on choosing a model type, refer to [this part of the documentation](./index.md#available-model-types). Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours.
|
||||||

|

|
||||||
|
|
||||||
## Step 3: Set your model id in the config
|
## Step 3: Set your model id in the config
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ id: improving_model
|
|||||||
title: Improving your model
|
title: Improving your model
|
||||||
---
|
---
|
||||||
|
|
||||||
You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. Because a limited number of users submitted images to Frigate+ prior to this launch, you may need to submit several hundred images per camera to see good results. With all the new images now being submitted, future base models will improve as more and more users (including you) submit examples to Frigate+. Note that only verified images will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
|
You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. With all the new images now being submitted by subscribers, future base models will improve as more and more examples are incorporated. Note that only images with at least one verified label will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
|
||||||
|
|
||||||
- **Submit both true positives and false positives**. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
- **Submit both true positives and false positives**. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
||||||
- **Lower your thresholds a little in order to generate more false/true positives near the threshold value**. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores.
|
- **Lower your thresholds a little in order to generate more false/true positives near the threshold value**. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores.
|
||||||
@@ -36,18 +36,17 @@ Misidentified objects should have a correct label added. For example, if a perso
|
|||||||
|
|
||||||
## Shortcuts for a faster workflow
|
## Shortcuts for a faster workflow
|
||||||
|
|
||||||
|Shortcut Key|Description|
|
| Shortcut Key | Description |
|
||||||
|-----|--------|
|
| ----------------- | ----------------------------- |
|
||||||
|`?`|Show all keyboard shortcuts|
|
| `?` | Show all keyboard shortcuts |
|
||||||
|`w`|Add box|
|
| `w` | Add box |
|
||||||
|`d`|Toggle difficult|
|
| `d` | Toggle difficult |
|
||||||
|`s`|Switch to the next label|
|
| `s` | Switch to the next label |
|
||||||
|`tab`|Select next largest box|
|
| `tab` | Select next largest box |
|
||||||
|`del`|Delete current box|
|
| `del` | Delete current box |
|
||||||
|`esc`|Deselect/Cancel|
|
| `esc` | Deselect/Cancel |
|
||||||
|`← ↑ → ↓`|Move box|
|
| `← ↑ → ↓` | Move box |
|
||||||
|`Shift + ← ↑ → ↓`|Resize box|
|
| `Shift + ← ↑ → ↓` | Resize box |
|
||||||
|`-`|Zoom out|
|
| `scrollwheel` | Zoom in/out |
|
||||||
|`=`|Zoom in|
|
| `f` | Hide/show all but current box |
|
||||||
|`f`|Hide/show all but current box|
|
| `spacebar` | Verify and save |
|
||||||
|`spacebar`|Verify and save|
|
|
||||||
|
|||||||
@@ -15,17 +15,36 @@ With a subscription, 12 model trainings per year are included. If you cancel you
|
|||||||
|
|
||||||
Information on how to integrate Frigate+ with Frigate can be found in the [integration docs](../integrations/plus.md).
|
Information on how to integrate Frigate+ with Frigate can be found in the [integration docs](../integrations/plus.md).
|
||||||
|
|
||||||
|
## Available model types
|
||||||
|
|
||||||
|
There are two model types offered in Frigate+: `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
|
||||||
|
|
||||||
|
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types).
|
||||||
|
|
||||||
|
| Model Type | Description |
|
||||||
|
| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
|
||||||
|
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
|
||||||
|
|
||||||
## Supported detector types
|
## Supported detector types
|
||||||
|
|
||||||
|
Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), ONNX (`onnx`), and ROCm (`rocm`) detectors.
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
|
|
||||||
Frigate+ models are not supported for TensorRT or OpenVino yet.
|
Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15, which is still under development.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
Currently, Frigate+ models only support CPU (`cpu`) and Coral (`edgetpu`) models. OpenVino is next in line to gain support.
|
| Hardware | Recommended Detector Type | Recommended Model Type |
|
||||||
|
| ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ---------------------- |
|
||||||
|
| [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended) | `cpu` | `mobiledet` |
|
||||||
|
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` |
|
||||||
|
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` |
|
||||||
|
| [NVidia GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` |
|
||||||
|
| [AMD ROCm GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#amdrocm-gpu-detector)\* | `rocm` | `yolonas` |
|
||||||
|
|
||||||
The models are created using the same MobileDet architecture as the default model. Additional architectures will be added in future releases as needed.
|
_\* Requires Frigate 0.15_
|
||||||
|
|
||||||
## Available label types
|
## Available label types
|
||||||
|
|
||||||
|
|||||||
@@ -49,7 +49,10 @@ The USB Coral can become stuck and need to be restarted, this can happen for a n
|
|||||||
|
|
||||||
## PCIe Coral Not Detected
|
## PCIe Coral Not Detected
|
||||||
|
|
||||||
The most common reason for the PCIe coral not being detected is that the driver has not been installed. See [the coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) for how to install the driver for the PCIe based coral.
|
The most common reason for the PCIe Coral not being detected is that the driver has not been installed. This process varies based on what OS and kernel that is being run.
|
||||||
|
|
||||||
|
- In most cases [the Coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) show how to install the driver for the PCIe based Coral.
|
||||||
|
- For Ubuntu 22.04+ https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
|
||||||
|
|
||||||
## Only One PCIe Coral Is Detected With Coral Dual EdgeTPU
|
## Only One PCIe Coral Is Detected With Coral Dual EdgeTPU
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ const sidebars: SidebarsConfig = {
|
|||||||
{
|
{
|
||||||
type: 'link',
|
type: 'link',
|
||||||
label: 'Go2RTC Configuration Reference',
|
label: 'Go2RTC Configuration Reference',
|
||||||
href: 'https://github.com/AlexxIT/go2rtc/tree/v1.9.4#configuration',
|
href: 'https://github.com/AlexxIT/go2rtc/tree/v1.9.2#configuration',
|
||||||
} as PropSidebarItemLink,
|
} as PropSidebarItemLink,
|
||||||
],
|
],
|
||||||
Detectors: [
|
Detectors: [
|
||||||
|
|||||||
90
docs/static/frigate-api.yaml
vendored
90
docs/static/frigate-api.yaml
vendored
@@ -7,7 +7,7 @@ info:
|
|||||||
|
|
||||||
servers:
|
servers:
|
||||||
- url: https://demo.frigate.video/api
|
- url: https://demo.frigate.video/api
|
||||||
- url: http://localhost:5001/
|
- url: http://localhost:5001/api
|
||||||
|
|
||||||
paths:
|
paths:
|
||||||
/auth:
|
/auth:
|
||||||
@@ -296,7 +296,7 @@ paths:
|
|||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/ReviewSetMultipleReviewedBody'
|
$ref: '#/components/schemas/ReviewModifyMultipleBody'
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
description: Successful Response
|
description: Successful Response
|
||||||
@@ -321,7 +321,7 @@ paths:
|
|||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/ReviewDeleteMultipleReviewsBody'
|
$ref: '#/components/schemas/ReviewModifyMultipleBody'
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
description: Successful Response
|
description: Successful Response
|
||||||
@@ -1141,11 +1141,11 @@ paths:
|
|||||||
type: number
|
type: number
|
||||||
title: End Time
|
title: End Time
|
||||||
requestBody:
|
requestBody:
|
||||||
|
required: true
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
type: object
|
$ref: '#/components/schemas/ExportRecordingsBody'
|
||||||
title: Body
|
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
description: Successful Response
|
description: Successful Response
|
||||||
@@ -1408,6 +1408,14 @@ paths:
|
|||||||
- type: number
|
- type: number
|
||||||
- type: 'null'
|
- type: 'null'
|
||||||
title: Max Length
|
title: Max Length
|
||||||
|
- name: event_id
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
anyOf:
|
||||||
|
- type: string
|
||||||
|
- type: 'null'
|
||||||
|
title: Event Id
|
||||||
- name: sort
|
- name: sort
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
@@ -1518,7 +1526,7 @@ paths:
|
|||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
- type: 'null'
|
- type: 'null'
|
||||||
default: thumbnail,description
|
default: thumbnail
|
||||||
title: Search Type
|
title: Search Type
|
||||||
- name: include_thumbnails
|
- name: include_thumbnails
|
||||||
in: query
|
in: query
|
||||||
@@ -1590,6 +1598,22 @@ paths:
|
|||||||
- type: 'null'
|
- type: 'null'
|
||||||
default: 00:00,24:00
|
default: 00:00,24:00
|
||||||
title: Time Range
|
title: Time Range
|
||||||
|
- name: has_clip
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
anyOf:
|
||||||
|
- type: boolean
|
||||||
|
- type: 'null'
|
||||||
|
title: Has Clip
|
||||||
|
- name: has_snapshot
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
schema:
|
||||||
|
anyOf:
|
||||||
|
- type: boolean
|
||||||
|
- type: 'null'
|
||||||
|
title: Has Snapshot
|
||||||
- name: timezone
|
- name: timezone
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
@@ -2356,14 +2380,14 @@ paths:
|
|||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
type: number
|
type: number
|
||||||
default: 1729274204.653048
|
default: 1731275308.238304
|
||||||
title: After
|
title: After
|
||||||
- name: before
|
- name: before
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
type: number
|
type: number
|
||||||
default: 1729277804.653095
|
default: 1731278908.238313
|
||||||
title: Before
|
title: Before
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
@@ -3262,6 +3286,27 @@ components:
|
|||||||
required:
|
required:
|
||||||
- subLabel
|
- subLabel
|
||||||
title: EventsSubLabelBody
|
title: EventsSubLabelBody
|
||||||
|
ExportRecordingsBody:
|
||||||
|
properties:
|
||||||
|
playback:
|
||||||
|
allOf:
|
||||||
|
- $ref: '#/components/schemas/PlaybackFactorEnum'
|
||||||
|
title: Playback factor
|
||||||
|
default: realtime
|
||||||
|
source:
|
||||||
|
allOf:
|
||||||
|
- $ref: '#/components/schemas/PlaybackSourceEnum'
|
||||||
|
title: Playback source
|
||||||
|
default: recordings
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
maxLength: 256
|
||||||
|
title: Friendly name
|
||||||
|
image_path:
|
||||||
|
type: string
|
||||||
|
title: Image Path
|
||||||
|
type: object
|
||||||
|
title: ExportRecordingsBody
|
||||||
Extension:
|
Extension:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
@@ -3313,6 +3358,18 @@ components:
|
|||||||
- total_alert
|
- total_alert
|
||||||
- total_detection
|
- total_detection
|
||||||
title: Last24HoursReview
|
title: Last24HoursReview
|
||||||
|
PlaybackFactorEnum:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- realtime
|
||||||
|
- timelapse_25x
|
||||||
|
title: PlaybackFactorEnum
|
||||||
|
PlaybackSourceEnum:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- recordings
|
||||||
|
- preview
|
||||||
|
title: PlaybackSourceEnum
|
||||||
RegenerateDescriptionEnum:
|
RegenerateDescriptionEnum:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
@@ -3336,7 +3393,7 @@ components:
|
|||||||
- motion
|
- motion
|
||||||
- camera
|
- camera
|
||||||
title: ReviewActivityMotionResponse
|
title: ReviewActivityMotionResponse
|
||||||
ReviewDeleteMultipleReviewsBody:
|
ReviewModifyMultipleBody:
|
||||||
properties:
|
properties:
|
||||||
ids:
|
ids:
|
||||||
items:
|
items:
|
||||||
@@ -3348,7 +3405,7 @@ components:
|
|||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
- ids
|
- ids
|
||||||
title: ReviewDeleteMultipleReviewsBody
|
title: ReviewModifyMultipleBody
|
||||||
ReviewSegmentResponse:
|
ReviewSegmentResponse:
|
||||||
properties:
|
properties:
|
||||||
id:
|
id:
|
||||||
@@ -3386,19 +3443,6 @@ components:
|
|||||||
- thumb_path
|
- thumb_path
|
||||||
- data
|
- data
|
||||||
title: ReviewSegmentResponse
|
title: ReviewSegmentResponse
|
||||||
ReviewSetMultipleReviewedBody:
|
|
||||||
properties:
|
|
||||||
ids:
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
minLength: 1
|
|
||||||
type: array
|
|
||||||
minItems: 1
|
|
||||||
title: Ids
|
|
||||||
type: object
|
|
||||||
required:
|
|
||||||
- ids
|
|
||||||
title: ReviewSetMultipleReviewedBody
|
|
||||||
ReviewSummaryResponse:
|
ReviewSummaryResponse:
|
||||||
properties:
|
properties:
|
||||||
last24Hours:
|
last24Hours:
|
||||||
|
|||||||
0
frigate/api/defs/__init__.py
Normal file
0
frigate/api/defs/__init__.py
Normal file
@@ -28,6 +28,7 @@ class EventsQueryParams(BaseModel):
|
|||||||
is_submitted: Optional[int] = None
|
is_submitted: Optional[int] = None
|
||||||
min_length: Optional[float] = None
|
min_length: Optional[float] = None
|
||||||
max_length: Optional[float] = None
|
max_length: Optional[float] = None
|
||||||
|
event_id: Optional[str] = None
|
||||||
sort: Optional[str] = None
|
sort: Optional[str] = None
|
||||||
timezone: Optional[str] = "utc"
|
timezone: Optional[str] = "utc"
|
||||||
|
|
||||||
@@ -46,6 +47,7 @@ class EventsSearchQueryParams(BaseModel):
|
|||||||
time_range: Optional[str] = DEFAULT_TIME_RANGE
|
time_range: Optional[str] = DEFAULT_TIME_RANGE
|
||||||
has_clip: Optional[bool] = None
|
has_clip: Optional[bool] = None
|
||||||
has_snapshot: Optional[bool] = None
|
has_snapshot: Optional[bool] = None
|
||||||
|
is_submitted: Optional[bool] = None
|
||||||
timezone: Optional[str] = "utc"
|
timezone: Optional[str] = "utc"
|
||||||
min_score: Optional[float] = None
|
min_score: Optional[float] = None
|
||||||
max_score: Optional[float] = None
|
max_score: Optional[float] = None
|
||||||
|
|||||||
0
frigate/api/defs/request/__init__.py
Normal file
0
frigate/api/defs/request/__init__.py
Normal file
20
frigate/api/defs/request/export_recordings_body.py
Normal file
20
frigate/api/defs/request/export_recordings_body.py
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
from typing import Union
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from pydantic.json_schema import SkipJsonSchema
|
||||||
|
|
||||||
|
from frigate.record.export import (
|
||||||
|
PlaybackFactorEnum,
|
||||||
|
PlaybackSourceEnum,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ExportRecordingsBody(BaseModel):
|
||||||
|
playback: PlaybackFactorEnum = Field(
|
||||||
|
default=PlaybackFactorEnum.realtime, title="Playback factor"
|
||||||
|
)
|
||||||
|
source: PlaybackSourceEnum = Field(
|
||||||
|
default=PlaybackSourceEnum.recordings, title="Playback source"
|
||||||
|
)
|
||||||
|
name: str = Field(title="Friendly name", default=None, max_length=256)
|
||||||
|
image_path: Union[str, SkipJsonSchema[None]] = None
|
||||||
@@ -88,6 +88,7 @@ def events(params: EventsQueryParams = Depends()):
|
|||||||
is_submitted = params.is_submitted
|
is_submitted = params.is_submitted
|
||||||
min_length = params.min_length
|
min_length = params.min_length
|
||||||
max_length = params.max_length
|
max_length = params.max_length
|
||||||
|
event_id = params.event_id
|
||||||
|
|
||||||
sort = params.sort
|
sort = params.sort
|
||||||
|
|
||||||
@@ -230,6 +231,9 @@ def events(params: EventsQueryParams = Depends()):
|
|||||||
elif is_submitted > 0:
|
elif is_submitted > 0:
|
||||||
clauses.append((Event.plus_id != ""))
|
clauses.append((Event.plus_id != ""))
|
||||||
|
|
||||||
|
if event_id is not None:
|
||||||
|
clauses.append((Event.id == event_id))
|
||||||
|
|
||||||
if len(clauses) == 0:
|
if len(clauses) == 0:
|
||||||
clauses.append((True))
|
clauses.append((True))
|
||||||
|
|
||||||
@@ -356,6 +360,7 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
|||||||
time_range = params.time_range
|
time_range = params.time_range
|
||||||
has_clip = params.has_clip
|
has_clip = params.has_clip
|
||||||
has_snapshot = params.has_snapshot
|
has_snapshot = params.has_snapshot
|
||||||
|
is_submitted = params.is_submitted
|
||||||
|
|
||||||
# for similarity search
|
# for similarity search
|
||||||
event_id = params.event_id
|
event_id = params.event_id
|
||||||
@@ -437,6 +442,12 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
|||||||
if has_snapshot is not None:
|
if has_snapshot is not None:
|
||||||
event_filters.append((Event.has_snapshot == has_snapshot))
|
event_filters.append((Event.has_snapshot == has_snapshot))
|
||||||
|
|
||||||
|
if is_submitted is not None:
|
||||||
|
if is_submitted == 0:
|
||||||
|
event_filters.append((Event.plus_id.is_null()))
|
||||||
|
elif is_submitted > 0:
|
||||||
|
event_filters.append((Event.plus_id != ""))
|
||||||
|
|
||||||
if min_score is not None and max_score is not None:
|
if min_score is not None and max_score is not None:
|
||||||
event_filters.append((Event.data["score"].between(min_score, max_score)))
|
event_filters.append((Event.data["score"].between(min_score, max_score)))
|
||||||
else:
|
else:
|
||||||
@@ -992,9 +1003,11 @@ def regenerate_description(
|
|||||||
status_code=404,
|
status_code=404,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
camera_config = request.app.frigate_config.cameras[event.camera]
|
||||||
|
|
||||||
if (
|
if (
|
||||||
request.app.frigate_config.semantic_search.enabled
|
request.app.frigate_config.semantic_search.enabled
|
||||||
and request.app.frigate_config.genai.enabled
|
and camera_config.genai.enabled
|
||||||
):
|
):
|
||||||
request.app.event_metadata_updater.publish((event.id, params.source))
|
request.app.event_metadata_updater.publish((event.id, params.source))
|
||||||
|
|
||||||
@@ -1038,9 +1051,6 @@ def delete_event(request: Request, event_id: str):
|
|||||||
media.unlink(missing_ok=True)
|
media.unlink(missing_ok=True)
|
||||||
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
|
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
|
||||||
media.unlink(missing_ok=True)
|
media.unlink(missing_ok=True)
|
||||||
if event.has_clip:
|
|
||||||
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
|
|
||||||
media.unlink(missing_ok=True)
|
|
||||||
|
|
||||||
event.delete_instance()
|
event.delete_instance()
|
||||||
Timeline.delete().where(Timeline.source_id == event_id).execute()
|
Timeline.delete().where(Timeline.source_id == event_id).execute()
|
||||||
|
|||||||
@@ -4,13 +4,13 @@ import logging
|
|||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import psutil
|
import psutil
|
||||||
from fastapi import APIRouter, Request
|
from fastapi import APIRouter, Request
|
||||||
from fastapi.responses import JSONResponse
|
from fastapi.responses import JSONResponse
|
||||||
from peewee import DoesNotExist
|
from peewee import DoesNotExist
|
||||||
|
|
||||||
|
from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody
|
||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
from frigate.const import EXPORT_DIR
|
from frigate.const import EXPORT_DIR
|
||||||
from frigate.models import Export, Previews, Recordings
|
from frigate.models import Export, Previews, Recordings
|
||||||
@@ -19,6 +19,7 @@ from frigate.record.export import (
|
|||||||
PlaybackSourceEnum,
|
PlaybackSourceEnum,
|
||||||
RecordingExporter,
|
RecordingExporter,
|
||||||
)
|
)
|
||||||
|
from frigate.util.builtin import is_current_hour
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -37,7 +38,7 @@ def export_recording(
|
|||||||
camera_name: str,
|
camera_name: str,
|
||||||
start_time: float,
|
start_time: float,
|
||||||
end_time: float,
|
end_time: float,
|
||||||
body: dict = None,
|
body: ExportRecordingsBody,
|
||||||
):
|
):
|
||||||
if not camera_name or not request.app.frigate_config.cameras.get(camera_name):
|
if not camera_name or not request.app.frigate_config.cameras.get(camera_name):
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
@@ -47,18 +48,10 @@ def export_recording(
|
|||||||
status_code=404,
|
status_code=404,
|
||||||
)
|
)
|
||||||
|
|
||||||
json: dict[str, any] = body or {}
|
playback_factor = body.playback
|
||||||
playback_factor = json.get("playback", "realtime")
|
playback_source = body.source
|
||||||
playback_source = json.get("source", "recordings")
|
friendly_name = body.name
|
||||||
friendly_name: Optional[str] = json.get("name")
|
existing_image = body.image_path
|
||||||
|
|
||||||
if len(friendly_name or "") > 256:
|
|
||||||
return JSONResponse(
|
|
||||||
content=({"success": False, "message": "File name is too long."}),
|
|
||||||
status_code=401,
|
|
||||||
)
|
|
||||||
|
|
||||||
existing_image = json.get("image_path")
|
|
||||||
|
|
||||||
if playback_source == "recordings":
|
if playback_source == "recordings":
|
||||||
recordings_count = (
|
recordings_count = (
|
||||||
@@ -94,7 +87,7 @@ def export_recording(
|
|||||||
.count()
|
.count()
|
||||||
)
|
)
|
||||||
|
|
||||||
if previews_count <= 0:
|
if not is_current_hour(start_time) and previews_count <= 0:
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content=(
|
content=(
|
||||||
{"success": False, "message": "No previews found for time range"}
|
{"success": False, "message": "No previews found for time range"}
|
||||||
|
|||||||
@@ -917,7 +917,7 @@ def grid_snapshot(
|
|||||||
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
||||||
|
|
||||||
return Response(
|
return Response(
|
||||||
jpg.tobytes,
|
jpg.tobytes(),
|
||||||
media_type="image/jpeg",
|
media_type="image/jpeg",
|
||||||
headers={"Cache-Control": "no-store"},
|
headers={"Cache-Control": "no-store"},
|
||||||
)
|
)
|
||||||
@@ -1453,7 +1453,6 @@ def preview_thumbnail(file_name: str):
|
|||||||
|
|
||||||
return Response(
|
return Response(
|
||||||
jpg_bytes,
|
jpg_bytes,
|
||||||
# FIXME: Shouldn't it be either jpg or webp depending on the endpoint?
|
|
||||||
media_type="image/webp",
|
media_type="image/webp",
|
||||||
headers={
|
headers={
|
||||||
"Content-Type": "image/webp",
|
"Content-Type": "image/webp",
|
||||||
@@ -1482,7 +1481,7 @@ def label_thumbnail(request: Request, camera_name: str, label: str):
|
|||||||
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
||||||
|
|
||||||
return Response(
|
return Response(
|
||||||
jpg.tobytes,
|
jpg.tobytes(),
|
||||||
media_type="image/jpeg",
|
media_type="image/jpeg",
|
||||||
headers={"Cache-Control": "no-store"},
|
headers={"Cache-Control": "no-store"},
|
||||||
)
|
)
|
||||||
@@ -1535,6 +1534,6 @@ def label_snapshot(request: Request, camera_name: str, label: str):
|
|||||||
_, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
_, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
||||||
|
|
||||||
return Response(
|
return Response(
|
||||||
jpg.tobytes,
|
jpg.tobytes(),
|
||||||
media_type="image/jpeg",
|
media_type="image/jpeg",
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -63,12 +63,12 @@ from frigate.record.cleanup import RecordingCleanup
|
|||||||
from frigate.record.export import migrate_exports
|
from frigate.record.export import migrate_exports
|
||||||
from frigate.record.record import manage_recordings
|
from frigate.record.record import manage_recordings
|
||||||
from frigate.review.review import manage_review_segments
|
from frigate.review.review import manage_review_segments
|
||||||
from frigate.service_manager import ServiceManager
|
|
||||||
from frigate.stats.emitter import StatsEmitter
|
from frigate.stats.emitter import StatsEmitter
|
||||||
from frigate.stats.util import stats_init
|
from frigate.stats.util import stats_init
|
||||||
from frigate.storage import StorageMaintainer
|
from frigate.storage import StorageMaintainer
|
||||||
from frigate.timeline import TimelineProcessor
|
from frigate.timeline import TimelineProcessor
|
||||||
from frigate.util.builtin import empty_and_close_queue
|
from frigate.util.builtin import empty_and_close_queue
|
||||||
|
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
|
||||||
from frigate.util.object import get_camera_regions_grid
|
from frigate.util.object import get_camera_regions_grid
|
||||||
from frigate.version import VERSION
|
from frigate.version import VERSION
|
||||||
from frigate.video import capture_camera, track_camera
|
from frigate.video import capture_camera, track_camera
|
||||||
@@ -79,6 +79,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class FrigateApp:
|
class FrigateApp:
|
||||||
def __init__(self, config: FrigateConfig) -> None:
|
def __init__(self, config: FrigateConfig) -> None:
|
||||||
|
self.audio_process: Optional[mp.Process] = None
|
||||||
self.stop_event: MpEvent = mp.Event()
|
self.stop_event: MpEvent = mp.Event()
|
||||||
self.detection_queue: Queue = mp.Queue()
|
self.detection_queue: Queue = mp.Queue()
|
||||||
self.detectors: dict[str, ObjectDetectProcess] = {}
|
self.detectors: dict[str, ObjectDetectProcess] = {}
|
||||||
@@ -90,6 +91,7 @@ class FrigateApp:
|
|||||||
self.processes: dict[str, int] = {}
|
self.processes: dict[str, int] = {}
|
||||||
self.embeddings: Optional[EmbeddingsContext] = None
|
self.embeddings: Optional[EmbeddingsContext] = None
|
||||||
self.region_grids: dict[str, list[list[dict[str, int]]]] = {}
|
self.region_grids: dict[str, list[list[dict[str, int]]]] = {}
|
||||||
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
self.config = config
|
self.config = config
|
||||||
|
|
||||||
def ensure_dirs(self) -> None:
|
def ensure_dirs(self) -> None:
|
||||||
@@ -325,20 +327,20 @@ class FrigateApp:
|
|||||||
for det in self.config.detectors.values()
|
for det in self.config.detectors.values()
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
shm_in = mp.shared_memory.SharedMemory(
|
shm_in = UntrackedSharedMemory(
|
||||||
name=name,
|
name=name,
|
||||||
create=True,
|
create=True,
|
||||||
size=largest_frame,
|
size=largest_frame,
|
||||||
)
|
)
|
||||||
except FileExistsError:
|
except FileExistsError:
|
||||||
shm_in = mp.shared_memory.SharedMemory(name=name)
|
shm_in = UntrackedSharedMemory(name=name)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
shm_out = mp.shared_memory.SharedMemory(
|
shm_out = UntrackedSharedMemory(
|
||||||
name=f"out-{name}", create=True, size=20 * 6 * 4
|
name=f"out-{name}", create=True, size=20 * 6 * 4
|
||||||
)
|
)
|
||||||
except FileExistsError:
|
except FileExistsError:
|
||||||
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}")
|
shm_out = UntrackedSharedMemory(name=f"out-{name}")
|
||||||
|
|
||||||
self.detection_shms.append(shm_in)
|
self.detection_shms.append(shm_in)
|
||||||
self.detection_shms.append(shm_out)
|
self.detection_shms.append(shm_out)
|
||||||
@@ -431,6 +433,11 @@ class FrigateApp:
|
|||||||
logger.info(f"Capture process not started for disabled camera {name}")
|
logger.info(f"Capture process not started for disabled camera {name}")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# pre-create shms
|
||||||
|
for i in range(shm_frame_count):
|
||||||
|
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
|
||||||
|
self.frame_manager.create(f"{config.name}{i}", frame_size)
|
||||||
|
|
||||||
capture_process = util.Process(
|
capture_process = util.Process(
|
||||||
target=capture_camera,
|
target=capture_camera,
|
||||||
name=f"camera_capture:{name}",
|
name=f"camera_capture:{name}",
|
||||||
@@ -449,8 +456,9 @@ class FrigateApp:
|
|||||||
]
|
]
|
||||||
|
|
||||||
if audio_cameras:
|
if audio_cameras:
|
||||||
proc = AudioProcessor(audio_cameras, self.camera_metrics).start(wait=True)
|
self.audio_process = AudioProcessor(audio_cameras, self.camera_metrics)
|
||||||
self.processes["audio_detector"] = proc.pid or 0
|
self.audio_process.start()
|
||||||
|
self.processes["audio_detector"] = self.audio_process.pid or 0
|
||||||
|
|
||||||
def start_timeline_processor(self) -> None:
|
def start_timeline_processor(self) -> None:
|
||||||
self.timeline_processor = TimelineProcessor(
|
self.timeline_processor = TimelineProcessor(
|
||||||
@@ -512,15 +520,18 @@ class FrigateApp:
|
|||||||
1,
|
1,
|
||||||
)
|
)
|
||||||
|
|
||||||
shm_frame_count = min(50, int(available_shm / (cam_total_frame_size)))
|
if cam_total_frame_size == 0.0:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
shm_frame_count = min(200, int(available_shm / (cam_total_frame_size)))
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM"
|
f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {shm_frame_count} frames for each camera in SHM"
|
||||||
)
|
)
|
||||||
|
|
||||||
if shm_frame_count < 10:
|
if shm_frame_count < 20:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 10)}MB."
|
f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size * 20)}MB."
|
||||||
)
|
)
|
||||||
|
|
||||||
return shm_frame_count
|
return shm_frame_count
|
||||||
@@ -638,6 +649,11 @@ class FrigateApp:
|
|||||||
ReviewSegment.end_time == None
|
ReviewSegment.end_time == None
|
||||||
).execute()
|
).execute()
|
||||||
|
|
||||||
|
# stop the audio process
|
||||||
|
if self.audio_process:
|
||||||
|
self.audio_process.terminate()
|
||||||
|
self.audio_process.join()
|
||||||
|
|
||||||
# ensure the capture processes are done
|
# ensure the capture processes are done
|
||||||
for camera, metrics in self.camera_metrics.items():
|
for camera, metrics in self.camera_metrics.items():
|
||||||
capture_process = metrics.capture_process
|
capture_process = metrics.capture_process
|
||||||
@@ -701,11 +717,10 @@ class FrigateApp:
|
|||||||
self.event_metadata_updater.stop()
|
self.event_metadata_updater.stop()
|
||||||
self.inter_zmq_proxy.stop()
|
self.inter_zmq_proxy.stop()
|
||||||
|
|
||||||
|
self.frame_manager.cleanup()
|
||||||
while len(self.detection_shms) > 0:
|
while len(self.detection_shms) > 0:
|
||||||
shm = self.detection_shms.pop()
|
shm = self.detection_shms.pop()
|
||||||
shm.close()
|
shm.close()
|
||||||
shm.unlink()
|
shm.unlink()
|
||||||
|
|
||||||
ServiceManager.current().shutdown(wait=True)
|
|
||||||
|
|
||||||
os._exit(os.EX_OK)
|
os._exit(os.EX_OK)
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ from frigate.const import (
|
|||||||
)
|
)
|
||||||
from frigate.models import Event, Previews, Recordings, ReviewSegment
|
from frigate.models import Event, Previews, Recordings, ReviewSegment
|
||||||
from frigate.ptz.onvif import OnvifCommandEnum, OnvifController
|
from frigate.ptz.onvif import OnvifCommandEnum, OnvifController
|
||||||
from frigate.types import ModelStatusTypesEnum
|
from frigate.types import ModelStatusTypesEnum, TrackedObjectUpdateTypesEnum
|
||||||
from frigate.util.object import get_camera_regions_grid
|
from frigate.util.object import get_camera_regions_grid
|
||||||
from frigate.util.services import restart_frigate
|
from frigate.util.services import restart_frigate
|
||||||
|
|
||||||
@@ -137,8 +137,14 @@ class Dispatcher:
|
|||||||
event.data["description"] = payload["description"]
|
event.data["description"] = payload["description"]
|
||||||
event.save()
|
event.save()
|
||||||
self.publish(
|
self.publish(
|
||||||
"event_update",
|
"tracked_object_update",
|
||||||
json.dumps({"id": event.id, "description": event.data["description"]}),
|
json.dumps(
|
||||||
|
{
|
||||||
|
"type": TrackedObjectUpdateTypesEnum.description,
|
||||||
|
"id": event.id,
|
||||||
|
"description": event.data["description"],
|
||||||
|
}
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
def handle_update_model_state():
|
def handle_update_model_state():
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ class EventUpdatePublisher(Publisher):
|
|||||||
super().__init__("update")
|
super().__init__("update")
|
||||||
|
|
||||||
def publish(
|
def publish(
|
||||||
self, payload: tuple[EventTypeEnum, EventStateEnum, str, dict[str, any]]
|
self, payload: tuple[EventTypeEnum, EventStateEnum, str, str, dict[str, any]]
|
||||||
) -> None:
|
) -> None:
|
||||||
super().publish(payload)
|
super().publish(payload)
|
||||||
|
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ class MqttClient(Communicator): # type: ignore[misc]
|
|||||||
"""Mqtt connection callback."""
|
"""Mqtt connection callback."""
|
||||||
threading.current_thread().name = "mqtt"
|
threading.current_thread().name = "mqtt"
|
||||||
if reason_code != 0:
|
if reason_code != 0:
|
||||||
if reason_code == "Server Unavailable":
|
if reason_code == "Server unavailable":
|
||||||
logger.error(
|
logger.error(
|
||||||
"Unable to connect to MQTT server: MQTT Server unavailable"
|
"Unable to connect to MQTT server: MQTT Server unavailable"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ class AuthConfig(FrigateBaseModel):
|
|||||||
default=False, title="Reset the admin password on startup"
|
default=False, title="Reset the admin password on startup"
|
||||||
)
|
)
|
||||||
cookie_name: str = Field(
|
cookie_name: str = Field(
|
||||||
default="frigate_token", title="Name for jwt token cookie", pattern=r"^[a-z]_*$"
|
default="frigate_token", title="Name for jwt token cookie", pattern=r"^[a-z_]+$"
|
||||||
)
|
)
|
||||||
cookie_secure: bool = Field(default=False, title="Set secure flag on cookie")
|
cookie_secure: bool = Field(default=False, title="Set secure flag on cookie")
|
||||||
session_length: int = Field(
|
session_length: int = Field(
|
||||||
|
|||||||
@@ -94,3 +94,10 @@ class RecordConfig(FrigateBaseModel):
|
|||||||
enabled_in_config: Optional[bool] = Field(
|
enabled_in_config: Optional[bool] = Field(
|
||||||
default=None, title="Keep track of original state of recording."
|
default=None, title="Keep track of original state of recording."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def event_pre_capture(self) -> int:
|
||||||
|
return max(
|
||||||
|
self.alerts.pre_capture,
|
||||||
|
self.detections.pre_capture,
|
||||||
|
)
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
yaml = YAML()
|
yaml = YAML()
|
||||||
|
|
||||||
DEFAULT_CONFIG_FILES = ["/config/config.yaml", "/config/config.yml"]
|
DEFAULT_CONFIG_FILE = "/config/config.yml"
|
||||||
DEFAULT_CONFIG = """
|
DEFAULT_CONFIG = """
|
||||||
mqtt:
|
mqtt:
|
||||||
enabled: False
|
enabled: False
|
||||||
@@ -634,27 +634,23 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load(cls, **kwargs):
|
def load(cls, **kwargs):
|
||||||
config_path = os.environ.get("CONFIG_FILE")
|
config_path = os.environ.get("CONFIG_FILE", DEFAULT_CONFIG_FILE)
|
||||||
|
|
||||||
# No explicit configuration file, try to find one in the default paths.
|
if not os.path.isfile(config_path):
|
||||||
if config_path is None:
|
config_path = config_path.replace("yml", "yaml")
|
||||||
for path in DEFAULT_CONFIG_FILES:
|
|
||||||
if os.path.isfile(path):
|
|
||||||
config_path = path
|
|
||||||
break
|
|
||||||
|
|
||||||
# No configuration file found, create one.
|
# No configuration file found, create one.
|
||||||
new_config = False
|
new_config = False
|
||||||
if config_path is None:
|
if not os.path.isfile(config_path):
|
||||||
logger.info("No config file found, saving default config")
|
logger.info("No config file found, saving default config")
|
||||||
config_path = DEFAULT_CONFIG_FILES[-1]
|
config_path = DEFAULT_CONFIG_FILE
|
||||||
new_config = True
|
new_config = True
|
||||||
else:
|
else:
|
||||||
# Check if the config file needs to be migrated.
|
# Check if the config file needs to be migrated.
|
||||||
migrate_frigate_config(config_path)
|
migrate_frigate_config(config_path)
|
||||||
|
|
||||||
# Finally, load the resulting configuration file.
|
# Finally, load the resulting configuration file.
|
||||||
with open(config_path, "a+") as f:
|
with open(config_path, "a+" if new_config else "r") as f:
|
||||||
# Only write the default config if the opened file is non-empty. This can happen as
|
# Only write the default config if the opened file is non-empty. This can happen as
|
||||||
# a race condition. It's extremely unlikely, but eh. Might as well check it.
|
# a race condition. It's extremely unlikely, but eh. Might as well check it.
|
||||||
if new_config and f.tell() == 0:
|
if new_config and f.tell() == 0:
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ EnvString = Annotated[str, AfterValidator(validate_env_string)]
|
|||||||
|
|
||||||
def validate_env_vars(v: dict[str, str], info: ValidationInfo) -> dict[str, str]:
|
def validate_env_vars(v: dict[str, str], info: ValidationInfo) -> dict[str, str]:
|
||||||
if isinstance(info.context, dict) and info.context.get("install", False):
|
if isinstance(info.context, dict) and info.context.get("install", False):
|
||||||
for k, v in v:
|
for k, v in v.items():
|
||||||
os.environ[k] = v
|
os.environ[k] = v
|
||||||
|
|
||||||
return v
|
return v
|
||||||
|
|||||||
@@ -27,6 +27,11 @@ class InputTensorEnum(str, Enum):
|
|||||||
nhwc = "nhwc"
|
nhwc = "nhwc"
|
||||||
|
|
||||||
|
|
||||||
|
class InputDTypeEnum(str, Enum):
|
||||||
|
float = "float"
|
||||||
|
int = "int"
|
||||||
|
|
||||||
|
|
||||||
class ModelTypeEnum(str, Enum):
|
class ModelTypeEnum(str, Enum):
|
||||||
ssd = "ssd"
|
ssd = "ssd"
|
||||||
yolox = "yolox"
|
yolox = "yolox"
|
||||||
@@ -53,6 +58,9 @@ class ModelConfig(BaseModel):
|
|||||||
input_pixel_format: PixelFormatEnum = Field(
|
input_pixel_format: PixelFormatEnum = Field(
|
||||||
default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format"
|
default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format"
|
||||||
)
|
)
|
||||||
|
input_dtype: InputDTypeEnum = Field(
|
||||||
|
default=InputDTypeEnum.int, title="Model Input D Type"
|
||||||
|
)
|
||||||
model_type: ModelTypeEnum = Field(
|
model_type: ModelTypeEnum = Field(
|
||||||
default=ModelTypeEnum.ssd, title="Object Detection Model Type"
|
default=ModelTypeEnum.ssd, title="Object Detection Model Type"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ class ONNXDetector(DetectionApi):
|
|||||||
|
|
||||||
logger.info(f"ONNX: {path} loaded")
|
logger.info(f"ONNX: {path} loaded")
|
||||||
|
|
||||||
def detect_raw(self, tensor_input):
|
def detect_raw(self, tensor_input: np.ndarray):
|
||||||
model_input_name = self.model.get_inputs()[0].name
|
model_input_name = self.model.get_inputs()[0].name
|
||||||
tensor_output = self.model.run(None, {model_input_name: tensor_input})
|
tensor_output = self.model.run(None, {model_input_name: tensor_input})
|
||||||
|
|
||||||
|
|||||||
@@ -98,9 +98,7 @@ class ROCmDetector(DetectionApi):
|
|||||||
else:
|
else:
|
||||||
logger.info(f"AMD/ROCm: loading model from {path}")
|
logger.info(f"AMD/ROCm: loading model from {path}")
|
||||||
|
|
||||||
if path.endswith(".onnx"):
|
if (
|
||||||
self.model = migraphx.parse_onnx(path)
|
|
||||||
elif (
|
|
||||||
path.endswith(".tf")
|
path.endswith(".tf")
|
||||||
or path.endswith(".tf2")
|
or path.endswith(".tf2")
|
||||||
or path.endswith(".tflite")
|
or path.endswith(".tflite")
|
||||||
@@ -108,7 +106,7 @@ class ROCmDetector(DetectionApi):
|
|||||||
# untested
|
# untested
|
||||||
self.model = migraphx.parse_tf(path)
|
self.model = migraphx.parse_tf(path)
|
||||||
else:
|
else:
|
||||||
raise Exception(f"AMD/ROCm: unknown model format {path}")
|
self.model = migraphx.parse_onnx(path)
|
||||||
|
|
||||||
logger.info("AMD/ROCm: compiling the model")
|
logger.info("AMD/ROCm: compiling the model")
|
||||||
|
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ from frigate.const import CLIPS_DIR, UPDATE_EVENT_DESCRIPTION
|
|||||||
from frigate.events.types import EventTypeEnum
|
from frigate.events.types import EventTypeEnum
|
||||||
from frigate.genai import get_genai_client
|
from frigate.genai import get_genai_client
|
||||||
from frigate.models import Event
|
from frigate.models import Event
|
||||||
|
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||||
from frigate.util.builtin import serialize
|
from frigate.util.builtin import serialize
|
||||||
from frigate.util.image import SharedMemoryFrameManager, calculate_region
|
from frigate.util.image import SharedMemoryFrameManager, calculate_region
|
||||||
|
|
||||||
@@ -62,7 +63,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
self.requestor = InterProcessRequestor()
|
self.requestor = InterProcessRequestor()
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
self.tracked_events = {}
|
self.tracked_events = {}
|
||||||
self.genai_client = get_genai_client(config.genai)
|
self.genai_client = get_genai_client(config)
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
"""Maintain a SQLite-vec database for semantic search."""
|
"""Maintain a SQLite-vec database for semantic search."""
|
||||||
@@ -113,7 +114,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
if update is None:
|
if update is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
source_type, _, camera, data = update
|
source_type, _, camera, frame_name, data = update
|
||||||
|
|
||||||
if not camera or source_type != EventTypeEnum.tracked_object:
|
if not camera or source_type != EventTypeEnum.tracked_object:
|
||||||
return
|
return
|
||||||
@@ -133,8 +134,9 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
# Create our own thumbnail based on the bounding box and the frame time
|
# Create our own thumbnail based on the bounding box and the frame time
|
||||||
try:
|
try:
|
||||||
frame_id = f"{camera}{data['frame_time']}"
|
yuv_frame = self.frame_manager.get(
|
||||||
yuv_frame = self.frame_manager.get(frame_id, camera_config.frame_shape_yuv)
|
frame_name, camera_config.frame_shape_yuv
|
||||||
|
)
|
||||||
|
|
||||||
if yuv_frame is not None:
|
if yuv_frame is not None:
|
||||||
data["thumbnail"] = self._create_thumbnail(yuv_frame, data["box"])
|
data["thumbnail"] = self._create_thumbnail(yuv_frame, data["box"])
|
||||||
@@ -146,7 +148,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
self.tracked_events[data["id"]].append(data)
|
self.tracked_events[data["id"]].append(data)
|
||||||
|
|
||||||
self.frame_manager.close(frame_id)
|
self.frame_manager.close(frame_name)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -287,7 +289,11 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
# fire and forget description update
|
# fire and forget description update
|
||||||
self.requestor.send_data(
|
self.requestor.send_data(
|
||||||
UPDATE_EVENT_DESCRIPTION,
|
UPDATE_EVENT_DESCRIPTION,
|
||||||
{"id": event.id, "description": description},
|
{
|
||||||
|
"type": TrackedObjectUpdateTypesEnum.description,
|
||||||
|
"id": event.id,
|
||||||
|
"description": description,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
# Embed the description
|
# Embed the description
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ from typing import Tuple
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
import frigate.util as util
|
||||||
from frigate.camera import CameraMetrics
|
from frigate.camera import CameraMetrics
|
||||||
from frigate.comms.config_updater import ConfigSubscriber
|
from frigate.comms.config_updater import ConfigSubscriber
|
||||||
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
|
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
|
||||||
@@ -25,7 +26,6 @@ from frigate.const import (
|
|||||||
from frigate.ffmpeg_presets import parse_preset_input
|
from frigate.ffmpeg_presets import parse_preset_input
|
||||||
from frigate.log import LogPipe
|
from frigate.log import LogPipe
|
||||||
from frigate.object_detection import load_labels
|
from frigate.object_detection import load_labels
|
||||||
from frigate.service_manager import ServiceProcess
|
|
||||||
from frigate.util.builtin import get_ffmpeg_arg_list
|
from frigate.util.builtin import get_ffmpeg_arg_list
|
||||||
from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg
|
from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg
|
||||||
|
|
||||||
@@ -63,7 +63,7 @@ def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class AudioProcessor(ServiceProcess):
|
class AudioProcessor(util.Process):
|
||||||
name = "frigate.audio_manager"
|
name = "frigate.audio_manager"
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -71,7 +71,7 @@ class AudioProcessor(ServiceProcess):
|
|||||||
cameras: list[CameraConfig],
|
cameras: list[CameraConfig],
|
||||||
camera_metrics: dict[str, CameraMetrics],
|
camera_metrics: dict[str, CameraMetrics],
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__(name="frigate.audio_manager", daemon=True)
|
||||||
|
|
||||||
self.camera_metrics = camera_metrics
|
self.camera_metrics = camera_metrics
|
||||||
self.cameras = cameras
|
self.cameras = cameras
|
||||||
|
|||||||
@@ -21,6 +21,9 @@ class EventCleanupType(str, Enum):
|
|||||||
snapshots = "snapshots"
|
snapshots = "snapshots"
|
||||||
|
|
||||||
|
|
||||||
|
CHUNK_SIZE = 50
|
||||||
|
|
||||||
|
|
||||||
class EventCleanup(threading.Thread):
|
class EventCleanup(threading.Thread):
|
||||||
def __init__(
|
def __init__(
|
||||||
self, config: FrigateConfig, stop_event: MpEvent, db: SqliteVecQueueDatabase
|
self, config: FrigateConfig, stop_event: MpEvent, db: SqliteVecQueueDatabase
|
||||||
@@ -107,6 +110,7 @@ class EventCleanup(threading.Thread):
|
|||||||
.namedtuples()
|
.namedtuples()
|
||||||
.iterator()
|
.iterator()
|
||||||
)
|
)
|
||||||
|
logger.debug(f"{len(expired_events)} events can be expired")
|
||||||
# delete the media from disk
|
# delete the media from disk
|
||||||
for expired in expired_events:
|
for expired in expired_events:
|
||||||
media_name = f"{expired.camera}-{expired.id}"
|
media_name = f"{expired.camera}-{expired.id}"
|
||||||
@@ -125,13 +129,34 @@ class EventCleanup(threading.Thread):
|
|||||||
logger.warning(f"Unable to delete event images: {e}")
|
logger.warning(f"Unable to delete event images: {e}")
|
||||||
|
|
||||||
# update the clips attribute for the db entry
|
# update the clips attribute for the db entry
|
||||||
update_query = Event.update(update_params).where(
|
query = Event.select(Event.id).where(
|
||||||
Event.camera.not_in(self.camera_keys),
|
Event.camera.not_in(self.camera_keys),
|
||||||
Event.start_time < expire_after,
|
Event.start_time < expire_after,
|
||||||
Event.label == event.label,
|
Event.label == event.label,
|
||||||
Event.retain_indefinitely == False,
|
Event.retain_indefinitely == False,
|
||||||
)
|
)
|
||||||
update_query.execute()
|
|
||||||
|
events_to_update = []
|
||||||
|
|
||||||
|
for batch in query.iterator():
|
||||||
|
events_to_update.extend([event.id for event in batch])
|
||||||
|
if len(events_to_update) >= CHUNK_SIZE:
|
||||||
|
logger.debug(
|
||||||
|
f"Updating {update_params} for {len(events_to_update)} events"
|
||||||
|
)
|
||||||
|
Event.update(update_params).where(
|
||||||
|
Event.id << events_to_update
|
||||||
|
).execute()
|
||||||
|
events_to_update = []
|
||||||
|
|
||||||
|
# Update any remaining events
|
||||||
|
if events_to_update:
|
||||||
|
logger.debug(
|
||||||
|
f"Updating clips/snapshots attribute for {len(events_to_update)} events"
|
||||||
|
)
|
||||||
|
Event.update(update_params).where(
|
||||||
|
Event.id << events_to_update
|
||||||
|
).execute()
|
||||||
|
|
||||||
events_to_update = []
|
events_to_update = []
|
||||||
|
|
||||||
@@ -196,7 +221,11 @@ class EventCleanup(threading.Thread):
|
|||||||
logger.warning(f"Unable to delete event images: {e}")
|
logger.warning(f"Unable to delete event images: {e}")
|
||||||
|
|
||||||
# update the clips attribute for the db entry
|
# update the clips attribute for the db entry
|
||||||
Event.update(update_params).where(Event.id << events_to_update).execute()
|
for i in range(0, len(events_to_update), CHUNK_SIZE):
|
||||||
|
batch = events_to_update[i : i + CHUNK_SIZE]
|
||||||
|
logger.debug(f"Updating {update_params} for {len(batch)} events")
|
||||||
|
Event.update(update_params).where(Event.id << batch).execute()
|
||||||
|
|
||||||
return events_to_update
|
return events_to_update
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
@@ -222,10 +251,11 @@ class EventCleanup(threading.Thread):
|
|||||||
.iterator()
|
.iterator()
|
||||||
)
|
)
|
||||||
events_to_delete = [e.id for e in events]
|
events_to_delete = [e.id for e in events]
|
||||||
|
logger.debug(f"Found {len(events_to_delete)} events that can be expired")
|
||||||
if len(events_to_delete) > 0:
|
if len(events_to_delete) > 0:
|
||||||
chunk_size = 50
|
for i in range(0, len(events_to_delete), CHUNK_SIZE):
|
||||||
for i in range(0, len(events_to_delete), chunk_size):
|
chunk = events_to_delete[i : i + CHUNK_SIZE]
|
||||||
chunk = events_to_delete[i : i + chunk_size]
|
logger.debug(f"Deleting {len(chunk)} events from the database")
|
||||||
Event.delete().where(Event.id << chunk).execute()
|
Event.delete().where(Event.id << chunk).execute()
|
||||||
|
|
||||||
if self.config.semantic_search.enabled:
|
if self.config.semantic_search.enabled:
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ class ExternalEventProcessor:
|
|||||||
"sub_label": sub_label,
|
"sub_label": sub_label,
|
||||||
"score": score,
|
"score": score,
|
||||||
"camera": camera,
|
"camera": camera,
|
||||||
"start_time": now,
|
"start_time": now - camera_config.record.event_pre_capture,
|
||||||
"end_time": end,
|
"end_time": end,
|
||||||
"thumbnail": thumbnail,
|
"thumbnail": thumbnail,
|
||||||
"has_clip": camera_config.record.enabled and include_recording,
|
"has_clip": camera_config.record.enabled and include_recording,
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ class EventProcessor(threading.Thread):
|
|||||||
if update == None:
|
if update == None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
source_type, event_type, camera, event_data = update
|
source_type, event_type, camera, _, event_data = update
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"Event received: {source_type} {event_type} {camera} {event_data['id']}"
|
f"Event received: {source_type} {event_type} {camera} {event_data['id']}"
|
||||||
|
|||||||
@@ -1,14 +1,17 @@
|
|||||||
"""Generative AI module for Frigate."""
|
"""Generative AI module for Frigate."""
|
||||||
|
|
||||||
import importlib
|
import importlib
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from playhouse.shortcuts import model_to_dict
|
from playhouse.shortcuts import model_to_dict
|
||||||
|
|
||||||
from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum
|
from frigate.config import CameraConfig, FrigateConfig, GenAIConfig, GenAIProviderEnum
|
||||||
from frigate.models import Event
|
from frigate.models import Event
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
PROVIDERS = {}
|
PROVIDERS = {}
|
||||||
|
|
||||||
|
|
||||||
@@ -41,6 +44,7 @@ class GenAIClient:
|
|||||||
event.label,
|
event.label,
|
||||||
camera_config.genai.prompt,
|
camera_config.genai.prompt,
|
||||||
).format(**model_to_dict(event))
|
).format(**model_to_dict(event))
|
||||||
|
logger.debug(f"Sending images to genai provider with prompt: {prompt}")
|
||||||
return self._send(prompt, thumbnails)
|
return self._send(prompt, thumbnails)
|
||||||
|
|
||||||
def _init_provider(self):
|
def _init_provider(self):
|
||||||
@@ -52,13 +56,19 @@ class GenAIClient:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def get_genai_client(genai_config: GenAIConfig) -> Optional[GenAIClient]:
|
def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]:
|
||||||
"""Get the GenAI client."""
|
"""Get the GenAI client."""
|
||||||
if genai_config.enabled:
|
genai_config = config.genai
|
||||||
|
genai_cameras = [
|
||||||
|
c for c in config.cameras.values() if c.enabled and c.genai.enabled
|
||||||
|
]
|
||||||
|
|
||||||
|
if genai_cameras:
|
||||||
load_providers()
|
load_providers()
|
||||||
provider = PROVIDERS.get(genai_config.provider)
|
provider = PROVIDERS.get(genai_config.provider)
|
||||||
if provider:
|
if provider:
|
||||||
return provider(genai_config)
|
return provider(genai_config)
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -12,10 +12,14 @@ from setproctitle import setproctitle
|
|||||||
|
|
||||||
import frigate.util as util
|
import frigate.util as util
|
||||||
from frigate.detectors import create_detector
|
from frigate.detectors import create_detector
|
||||||
from frigate.detectors.detector_config import BaseDetectorConfig, InputTensorEnum
|
from frigate.detectors.detector_config import (
|
||||||
|
BaseDetectorConfig,
|
||||||
|
InputDTypeEnum,
|
||||||
|
InputTensorEnum,
|
||||||
|
)
|
||||||
from frigate.detectors.plugins.rocm import DETECTOR_KEY as ROCM_DETECTOR_KEY
|
from frigate.detectors.plugins.rocm import DETECTOR_KEY as ROCM_DETECTOR_KEY
|
||||||
from frigate.util.builtin import EventsPerSecond, load_labels
|
from frigate.util.builtin import EventsPerSecond, load_labels
|
||||||
from frigate.util.image import SharedMemoryFrameManager
|
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
|
||||||
from frigate.util.services import listen
|
from frigate.util.services import listen
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -55,12 +59,15 @@ class LocalObjectDetector(ObjectDetector):
|
|||||||
self.input_transform = tensor_transform(
|
self.input_transform = tensor_transform(
|
||||||
detector_config.model.input_tensor
|
detector_config.model.input_tensor
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.dtype = detector_config.model.input_dtype
|
||||||
else:
|
else:
|
||||||
self.input_transform = None
|
self.input_transform = None
|
||||||
|
self.dtype = InputDTypeEnum.int
|
||||||
|
|
||||||
self.detect_api = create_detector(detector_config)
|
self.detect_api = create_detector(detector_config)
|
||||||
|
|
||||||
def detect(self, tensor_input, threshold=0.4):
|
def detect(self, tensor_input: np.ndarray, threshold=0.4):
|
||||||
detections = []
|
detections = []
|
||||||
|
|
||||||
raw_detections = self.detect_raw(tensor_input)
|
raw_detections = self.detect_raw(tensor_input)
|
||||||
@@ -77,9 +84,14 @@ class LocalObjectDetector(ObjectDetector):
|
|||||||
self.fps.update()
|
self.fps.update()
|
||||||
return detections
|
return detections
|
||||||
|
|
||||||
def detect_raw(self, tensor_input):
|
def detect_raw(self, tensor_input: np.ndarray):
|
||||||
if self.input_transform:
|
if self.input_transform:
|
||||||
tensor_input = np.transpose(tensor_input, self.input_transform)
|
tensor_input = np.transpose(tensor_input, self.input_transform)
|
||||||
|
|
||||||
|
if self.dtype == InputDTypeEnum.float:
|
||||||
|
tensor_input = tensor_input.astype(np.float32)
|
||||||
|
tensor_input /= 255
|
||||||
|
|
||||||
return self.detect_api.detect_raw(tensor_input=tensor_input)
|
return self.detect_api.detect_raw(tensor_input=tensor_input)
|
||||||
|
|
||||||
|
|
||||||
@@ -110,7 +122,7 @@ def run_detector(
|
|||||||
|
|
||||||
outputs = {}
|
outputs = {}
|
||||||
for name in out_events.keys():
|
for name in out_events.keys():
|
||||||
out_shm = mp.shared_memory.SharedMemory(name=f"out-{name}", create=False)
|
out_shm = UntrackedSharedMemory(name=f"out-{name}", create=False)
|
||||||
out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
|
out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
|
||||||
outputs[name] = {"shm": out_shm, "np": out_np}
|
outputs[name] = {"shm": out_shm, "np": out_np}
|
||||||
|
|
||||||
@@ -200,15 +212,13 @@ class RemoteObjectDetector:
|
|||||||
self.detection_queue = detection_queue
|
self.detection_queue = detection_queue
|
||||||
self.event = event
|
self.event = event
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
|
self.shm = UntrackedSharedMemory(name=self.name, create=False)
|
||||||
self.np_shm = np.ndarray(
|
self.np_shm = np.ndarray(
|
||||||
(1, model_config.height, model_config.width, 3),
|
(1, model_config.height, model_config.width, 3),
|
||||||
dtype=np.uint8,
|
dtype=np.uint8,
|
||||||
buffer=self.shm.buf,
|
buffer=self.shm.buf,
|
||||||
)
|
)
|
||||||
self.out_shm = mp.shared_memory.SharedMemory(
|
self.out_shm = UntrackedSharedMemory(name=f"out-{self.name}", create=False)
|
||||||
name=f"out-{self.name}", create=False
|
|
||||||
)
|
|
||||||
self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)
|
self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)
|
||||||
|
|
||||||
def detect(self, tensor_input, threshold=0.4):
|
def detect(self, tensor_input, threshold=0.4):
|
||||||
|
|||||||
@@ -233,17 +233,18 @@ class CameraState:
|
|||||||
def on(self, event_type: str, callback: Callable[[dict], None]):
|
def on(self, event_type: str, callback: Callable[[dict], None]):
|
||||||
self.callbacks[event_type].append(callback)
|
self.callbacks[event_type].append(callback)
|
||||||
|
|
||||||
def update(self, frame_time, current_detections, motion_boxes, regions):
|
def update(
|
||||||
# get the new frame
|
self,
|
||||||
frame_id = f"{self.name}{frame_time}"
|
frame_name: str,
|
||||||
|
frame_time: float,
|
||||||
|
current_detections: dict[str, dict[str, any]],
|
||||||
|
motion_boxes: list[tuple[int, int, int, int]],
|
||||||
|
regions: list[tuple[int, int, int, int]],
|
||||||
|
):
|
||||||
current_frame = self.frame_manager.get(
|
current_frame = self.frame_manager.get(
|
||||||
frame_id, self.camera_config.frame_shape_yuv
|
frame_name, self.camera_config.frame_shape_yuv
|
||||||
)
|
)
|
||||||
|
|
||||||
if current_frame is None:
|
|
||||||
logger.debug(f"Failed to get frame {frame_id} from SHM")
|
|
||||||
|
|
||||||
tracked_objects = self.tracked_objects.copy()
|
tracked_objects = self.tracked_objects.copy()
|
||||||
current_ids = set(current_detections.keys())
|
current_ids = set(current_detections.keys())
|
||||||
previous_ids = set(tracked_objects.keys())
|
previous_ids = set(tracked_objects.keys())
|
||||||
@@ -261,7 +262,7 @@ class CameraState:
|
|||||||
|
|
||||||
# call event handlers
|
# call event handlers
|
||||||
for c in self.callbacks["start"]:
|
for c in self.callbacks["start"]:
|
||||||
c(self.name, new_obj, frame_time)
|
c(self.name, new_obj, frame_name)
|
||||||
|
|
||||||
for id in updated_ids:
|
for id in updated_ids:
|
||||||
updated_obj = tracked_objects[id]
|
updated_obj = tracked_objects[id]
|
||||||
@@ -271,7 +272,7 @@ class CameraState:
|
|||||||
|
|
||||||
if autotracker_update or significant_update:
|
if autotracker_update or significant_update:
|
||||||
for c in self.callbacks["autotrack"]:
|
for c in self.callbacks["autotrack"]:
|
||||||
c(self.name, updated_obj, frame_time)
|
c(self.name, updated_obj, frame_name)
|
||||||
|
|
||||||
if thumb_update and current_frame is not None:
|
if thumb_update and current_frame is not None:
|
||||||
# ensure this frame is stored in the cache
|
# ensure this frame is stored in the cache
|
||||||
@@ -292,7 +293,7 @@ class CameraState:
|
|||||||
) or significant_update:
|
) or significant_update:
|
||||||
# call event handlers
|
# call event handlers
|
||||||
for c in self.callbacks["update"]:
|
for c in self.callbacks["update"]:
|
||||||
c(self.name, updated_obj, frame_time)
|
c(self.name, updated_obj, frame_name)
|
||||||
updated_obj.last_published = frame_time
|
updated_obj.last_published = frame_time
|
||||||
|
|
||||||
for id in removed_ids:
|
for id in removed_ids:
|
||||||
@@ -301,7 +302,7 @@ class CameraState:
|
|||||||
if "end_time" not in removed_obj.obj_data:
|
if "end_time" not in removed_obj.obj_data:
|
||||||
removed_obj.obj_data["end_time"] = frame_time
|
removed_obj.obj_data["end_time"] = frame_time
|
||||||
for c in self.callbacks["end"]:
|
for c in self.callbacks["end"]:
|
||||||
c(self.name, removed_obj, frame_time)
|
c(self.name, removed_obj, frame_name)
|
||||||
|
|
||||||
# TODO: can i switch to looking this up and only changing when an event ends?
|
# TODO: can i switch to looking this up and only changing when an event ends?
|
||||||
# maintain best objects
|
# maintain best objects
|
||||||
@@ -367,11 +368,11 @@ class CameraState:
|
|||||||
):
|
):
|
||||||
self.best_objects[object_type] = obj
|
self.best_objects[object_type] = obj
|
||||||
for c in self.callbacks["snapshot"]:
|
for c in self.callbacks["snapshot"]:
|
||||||
c(self.name, self.best_objects[object_type], frame_time)
|
c(self.name, self.best_objects[object_type], frame_name)
|
||||||
else:
|
else:
|
||||||
self.best_objects[object_type] = obj
|
self.best_objects[object_type] = obj
|
||||||
for c in self.callbacks["snapshot"]:
|
for c in self.callbacks["snapshot"]:
|
||||||
c(self.name, self.best_objects[object_type], frame_time)
|
c(self.name, self.best_objects[object_type], frame_name)
|
||||||
|
|
||||||
for c in self.callbacks["camera_activity"]:
|
for c in self.callbacks["camera_activity"]:
|
||||||
c(self.name, camera_activity)
|
c(self.name, camera_activity)
|
||||||
@@ -446,7 +447,7 @@ class CameraState:
|
|||||||
c(self.name, obj_name, 0)
|
c(self.name, obj_name, 0)
|
||||||
self.active_object_counts[obj_name] = 0
|
self.active_object_counts[obj_name] = 0
|
||||||
for c in self.callbacks["snapshot"]:
|
for c in self.callbacks["snapshot"]:
|
||||||
c(self.name, self.best_objects[obj_name], frame_time)
|
c(self.name, self.best_objects[obj_name], frame_name)
|
||||||
|
|
||||||
# cleanup thumbnail frame cache
|
# cleanup thumbnail frame cache
|
||||||
current_thumb_frames = {
|
current_thumb_frames = {
|
||||||
@@ -477,7 +478,7 @@ class CameraState:
|
|||||||
if self.previous_frame_id is not None:
|
if self.previous_frame_id is not None:
|
||||||
self.frame_manager.close(self.previous_frame_id)
|
self.frame_manager.close(self.previous_frame_id)
|
||||||
|
|
||||||
self.previous_frame_id = frame_id
|
self.previous_frame_id = frame_name
|
||||||
|
|
||||||
|
|
||||||
class TrackedObjectProcessor(threading.Thread):
|
class TrackedObjectProcessor(threading.Thread):
|
||||||
@@ -517,17 +518,18 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
self.zone_data = defaultdict(lambda: defaultdict(dict))
|
self.zone_data = defaultdict(lambda: defaultdict(dict))
|
||||||
self.active_zone_data = defaultdict(lambda: defaultdict(dict))
|
self.active_zone_data = defaultdict(lambda: defaultdict(dict))
|
||||||
|
|
||||||
def start(camera, obj: TrackedObject, current_frame_time):
|
def start(camera: str, obj: TrackedObject, frame_name: str):
|
||||||
self.event_sender.publish(
|
self.event_sender.publish(
|
||||||
(
|
(
|
||||||
EventTypeEnum.tracked_object,
|
EventTypeEnum.tracked_object,
|
||||||
EventStateEnum.start,
|
EventStateEnum.start,
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
obj.to_dict(),
|
obj.to_dict(),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
def update(camera, obj: TrackedObject, current_frame_time):
|
def update(camera: str, obj: TrackedObject, frame_name: str):
|
||||||
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
||||||
obj.has_clip = self.should_retain_recording(camera, obj)
|
obj.has_clip = self.should_retain_recording(camera, obj)
|
||||||
after = obj.to_dict()
|
after = obj.to_dict()
|
||||||
@@ -543,14 +545,15 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
EventTypeEnum.tracked_object,
|
EventTypeEnum.tracked_object,
|
||||||
EventStateEnum.update,
|
EventStateEnum.update,
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
obj.to_dict(include_thumbnail=True),
|
obj.to_dict(include_thumbnail=True),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
def autotrack(camera, obj: TrackedObject, current_frame_time):
|
def autotrack(camera: str, obj: TrackedObject, frame_name: str):
|
||||||
self.ptz_autotracker_thread.ptz_autotracker.autotrack_object(camera, obj)
|
self.ptz_autotracker_thread.ptz_autotracker.autotrack_object(camera, obj)
|
||||||
|
|
||||||
def end(camera, obj: TrackedObject, current_frame_time):
|
def end(camera: str, obj: TrackedObject, frame_name: str):
|
||||||
# populate has_snapshot
|
# populate has_snapshot
|
||||||
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
||||||
obj.has_clip = self.should_retain_recording(camera, obj)
|
obj.has_clip = self.should_retain_recording(camera, obj)
|
||||||
@@ -605,11 +608,12 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
EventTypeEnum.tracked_object,
|
EventTypeEnum.tracked_object,
|
||||||
EventStateEnum.end,
|
EventStateEnum.end,
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
obj.to_dict(include_thumbnail=True),
|
obj.to_dict(include_thumbnail=True),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
def snapshot(camera, obj: TrackedObject, current_frame_time):
|
def snapshot(camera, obj: TrackedObject, frame_name: str):
|
||||||
mqtt_config: MqttConfig = self.config.cameras[camera].mqtt
|
mqtt_config: MqttConfig = self.config.cameras[camera].mqtt
|
||||||
if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
|
if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
|
||||||
jpg_bytes = obj.get_jpg_bytes(
|
jpg_bytes = obj.get_jpg_bytes(
|
||||||
@@ -714,7 +718,8 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
)
|
)
|
||||||
and (
|
and (
|
||||||
not review_config.detections.required_zones
|
not review_config.detections.required_zones
|
||||||
or set(obj.entered_zones) & set(review_config.alerts.required_zones)
|
or set(obj.entered_zones)
|
||||||
|
& set(review_config.detections.required_zones)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
):
|
):
|
||||||
@@ -797,6 +802,7 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
try:
|
try:
|
||||||
(
|
(
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
frame_time,
|
frame_time,
|
||||||
current_tracked_objects,
|
current_tracked_objects,
|
||||||
motion_boxes,
|
motion_boxes,
|
||||||
@@ -808,7 +814,7 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
camera_state = self.camera_states[camera]
|
camera_state = self.camera_states[camera]
|
||||||
|
|
||||||
camera_state.update(
|
camera_state.update(
|
||||||
frame_time, current_tracked_objects, motion_boxes, regions
|
frame_name, frame_time, current_tracked_objects, motion_boxes, regions
|
||||||
)
|
)
|
||||||
|
|
||||||
self.update_mqtt_motion(camera, frame_time, motion_boxes)
|
self.update_mqtt_motion(camera, frame_time, motion_boxes)
|
||||||
@@ -821,6 +827,7 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
self.detection_publisher.publish(
|
self.detection_publisher.publish(
|
||||||
(
|
(
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
frame_time,
|
frame_time,
|
||||||
tracked_objects,
|
tracked_objects,
|
||||||
motion_boxes,
|
motion_boxes,
|
||||||
|
|||||||
@@ -268,12 +268,10 @@ class BirdsEyeFrameManager:
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
config: FrigateConfig,
|
config: FrigateConfig,
|
||||||
frame_manager: SharedMemoryFrameManager,
|
|
||||||
stop_event: mp.Event,
|
stop_event: mp.Event,
|
||||||
):
|
):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.mode = config.birdseye.mode
|
self.mode = config.birdseye.mode
|
||||||
self.frame_manager = frame_manager
|
|
||||||
width, height = get_canvas_shape(config.birdseye.width, config.birdseye.height)
|
width, height = get_canvas_shape(config.birdseye.width, config.birdseye.height)
|
||||||
self.frame_shape = (height, width)
|
self.frame_shape = (height, width)
|
||||||
self.yuv_shape = (height * 3 // 2, width)
|
self.yuv_shape = (height * 3 // 2, width)
|
||||||
@@ -351,18 +349,13 @@ class BirdsEyeFrameManager:
|
|||||||
logger.debug("Clearing the birdseye frame")
|
logger.debug("Clearing the birdseye frame")
|
||||||
self.frame[:] = self.blank_frame
|
self.frame[:] = self.blank_frame
|
||||||
|
|
||||||
def copy_to_position(self, position, camera=None, frame_time=None):
|
def copy_to_position(self, position, camera=None, frame: np.ndarray = None):
|
||||||
if camera is None:
|
if camera is None:
|
||||||
frame = None
|
frame = None
|
||||||
channel_dims = None
|
channel_dims = None
|
||||||
else:
|
else:
|
||||||
frame_id = f"{camera}{frame_time}"
|
|
||||||
frame = self.frame_manager.get(
|
|
||||||
frame_id, self.config.cameras[camera].frame_shape_yuv
|
|
||||||
)
|
|
||||||
|
|
||||||
if frame is None:
|
if frame is None:
|
||||||
logger.debug(f"Unable to copy frame {camera}{frame_time} to birdseye.")
|
logger.debug(f"Unable to copy frame {camera} to birdseye.")
|
||||||
return
|
return
|
||||||
|
|
||||||
channel_dims = self.cameras[camera]["channel_dims"]
|
channel_dims = self.cameras[camera]["channel_dims"]
|
||||||
@@ -375,8 +368,6 @@ class BirdsEyeFrameManager:
|
|||||||
channel_dims,
|
channel_dims,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.frame_manager.close(frame_id)
|
|
||||||
|
|
||||||
def camera_active(self, mode, object_box_count, motion_box_count):
|
def camera_active(self, mode, object_box_count, motion_box_count):
|
||||||
if mode == BirdseyeModeEnum.continuous:
|
if mode == BirdseyeModeEnum.continuous:
|
||||||
return True
|
return True
|
||||||
@@ -387,7 +378,7 @@ class BirdsEyeFrameManager:
|
|||||||
if mode == BirdseyeModeEnum.objects and object_box_count > 0:
|
if mode == BirdseyeModeEnum.objects and object_box_count > 0:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def update_frame(self):
|
def update_frame(self, frame: np.ndarray):
|
||||||
"""Update to a new frame for birdseye."""
|
"""Update to a new frame for birdseye."""
|
||||||
|
|
||||||
# determine how many cameras are tracking objects within the last inactivity_threshold seconds
|
# determine how many cameras are tracking objects within the last inactivity_threshold seconds
|
||||||
@@ -397,7 +388,7 @@ class BirdsEyeFrameManager:
|
|||||||
for cam, cam_data in self.cameras.items()
|
for cam, cam_data in self.cameras.items()
|
||||||
if self.config.cameras[cam].birdseye.enabled
|
if self.config.cameras[cam].birdseye.enabled
|
||||||
and cam_data["last_active_frame"] > 0
|
and cam_data["last_active_frame"] > 0
|
||||||
and cam_data["current_frame"] - cam_data["last_active_frame"]
|
and cam_data["current_frame_time"] - cam_data["last_active_frame"]
|
||||||
< self.inactivity_threshold
|
< self.inactivity_threshold
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
@@ -414,7 +405,7 @@ class BirdsEyeFrameManager:
|
|||||||
limited_active_cameras = sorted(
|
limited_active_cameras = sorted(
|
||||||
active_cameras,
|
active_cameras,
|
||||||
key=lambda active_camera: (
|
key=lambda active_camera: (
|
||||||
self.cameras[active_camera]["current_frame"]
|
self.cameras[active_camera]["current_frame_time"]
|
||||||
- self.cameras[active_camera]["last_active_frame"]
|
- self.cameras[active_camera]["last_active_frame"]
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
@@ -524,7 +515,9 @@ class BirdsEyeFrameManager:
|
|||||||
for row in self.camera_layout:
|
for row in self.camera_layout:
|
||||||
for position in row:
|
for position in row:
|
||||||
self.copy_to_position(
|
self.copy_to_position(
|
||||||
position[1], position[0], self.cameras[position[0]]["current_frame"]
|
position[1],
|
||||||
|
position[0],
|
||||||
|
self.cameras[position[0]]["current_frame"],
|
||||||
)
|
)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@@ -672,7 +665,14 @@ class BirdsEyeFrameManager:
|
|||||||
else:
|
else:
|
||||||
return standard_candidate_layout
|
return standard_candidate_layout
|
||||||
|
|
||||||
def update(self, camera, object_count, motion_count, frame_time, frame) -> bool:
|
def update(
|
||||||
|
self,
|
||||||
|
camera: str,
|
||||||
|
object_count: int,
|
||||||
|
motion_count: int,
|
||||||
|
frame_time: float,
|
||||||
|
frame: np.ndarray,
|
||||||
|
) -> bool:
|
||||||
# don't process if birdseye is disabled for this camera
|
# don't process if birdseye is disabled for this camera
|
||||||
camera_config = self.config.cameras[camera].birdseye
|
camera_config = self.config.cameras[camera].birdseye
|
||||||
|
|
||||||
@@ -689,7 +689,8 @@ class BirdsEyeFrameManager:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
# update the last active frame for the camera
|
# update the last active frame for the camera
|
||||||
self.cameras[camera]["current_frame"] = frame_time
|
self.cameras[camera]["current_frame"] = frame.copy()
|
||||||
|
self.cameras[camera]["current_frame_time"] = frame_time
|
||||||
if self.camera_active(camera_config.mode, object_count, motion_count):
|
if self.camera_active(camera_config.mode, object_count, motion_count):
|
||||||
self.cameras[camera]["last_active_frame"] = frame_time
|
self.cameras[camera]["last_active_frame"] = frame_time
|
||||||
|
|
||||||
@@ -700,7 +701,7 @@ class BirdsEyeFrameManager:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
updated_frame = self.update_frame()
|
updated_frame = self.update_frame(frame)
|
||||||
except Exception:
|
except Exception:
|
||||||
updated_frame = False
|
updated_frame = False
|
||||||
self.active_cameras = []
|
self.active_cameras = []
|
||||||
@@ -737,12 +738,12 @@ class Birdseye:
|
|||||||
self.broadcaster = BroadcastThread(
|
self.broadcaster = BroadcastThread(
|
||||||
"birdseye", self.converter, websocket_server, stop_event
|
"birdseye", self.converter, websocket_server, stop_event
|
||||||
)
|
)
|
||||||
frame_manager = SharedMemoryFrameManager()
|
self.birdseye_manager = BirdsEyeFrameManager(config, stop_event)
|
||||||
self.birdseye_manager = BirdsEyeFrameManager(config, frame_manager, stop_event)
|
|
||||||
self.config_subscriber = ConfigSubscriber("config/birdseye/")
|
self.config_subscriber = ConfigSubscriber("config/birdseye/")
|
||||||
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
|
|
||||||
if config.birdseye.restream:
|
if config.birdseye.restream:
|
||||||
self.birdseye_buffer = frame_manager.create(
|
self.birdseye_buffer = self.frame_manager.create(
|
||||||
"birdseye",
|
"birdseye",
|
||||||
self.birdseye_manager.yuv_shape[0] * self.birdseye_manager.yuv_shape[1],
|
self.birdseye_manager.yuv_shape[0] * self.birdseye_manager.yuv_shape[1],
|
||||||
)
|
)
|
||||||
@@ -756,7 +757,7 @@ class Birdseye:
|
|||||||
current_tracked_objects: list[dict[str, any]],
|
current_tracked_objects: list[dict[str, any]],
|
||||||
motion_boxes: list[list[int]],
|
motion_boxes: list[list[int]],
|
||||||
frame_time: float,
|
frame_time: float,
|
||||||
frame,
|
frame: np.ndarray,
|
||||||
) -> None:
|
) -> None:
|
||||||
# check if there is an updated config
|
# check if there is an updated config
|
||||||
while True:
|
while True:
|
||||||
|
|||||||
@@ -63,6 +63,7 @@ def output_frames(
|
|||||||
birdseye: Optional[Birdseye] = None
|
birdseye: Optional[Birdseye] = None
|
||||||
preview_recorders: dict[str, PreviewRecorder] = {}
|
preview_recorders: dict[str, PreviewRecorder] = {}
|
||||||
preview_write_times: dict[str, float] = {}
|
preview_write_times: dict[str, float] = {}
|
||||||
|
failed_frame_requests: dict[str, int] = {}
|
||||||
|
|
||||||
move_preview_frames("cache")
|
move_preview_frames("cache")
|
||||||
|
|
||||||
@@ -87,19 +88,27 @@ def output_frames(
|
|||||||
|
|
||||||
(
|
(
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
frame_time,
|
frame_time,
|
||||||
current_tracked_objects,
|
current_tracked_objects,
|
||||||
motion_boxes,
|
motion_boxes,
|
||||||
regions,
|
_,
|
||||||
) = data
|
) = data
|
||||||
|
|
||||||
frame_id = f"{camera}{frame_time}"
|
frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv)
|
||||||
|
|
||||||
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
|
|
||||||
|
|
||||||
if frame is None:
|
if frame is None:
|
||||||
logger.debug(f"Failed to get frame {frame_id} from SHM")
|
logger.debug(f"Failed to get frame {frame_name} from SHM")
|
||||||
|
failed_frame_requests[camera] = failed_frame_requests.get(camera, 0) + 1
|
||||||
|
|
||||||
|
if failed_frame_requests[camera] > config.cameras[camera].detect.fps:
|
||||||
|
logger.warning(
|
||||||
|
f"Failed to retrieve many frames for {camera} from SHM, consider increasing SHM size if this continues."
|
||||||
|
)
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
else:
|
||||||
|
failed_frame_requests[camera] = 0
|
||||||
|
|
||||||
# send camera frame to ffmpeg process if websockets are connected
|
# send camera frame to ffmpeg process if websockets are connected
|
||||||
if any(
|
if any(
|
||||||
@@ -134,12 +143,15 @@ def output_frames(
|
|||||||
# check for any cameras that are currently offline
|
# check for any cameras that are currently offline
|
||||||
# and need to generate a preview
|
# and need to generate a preview
|
||||||
if generated_preview:
|
if generated_preview:
|
||||||
|
logger.debug(
|
||||||
|
"Checking for offline cameras because another camera generated a preview."
|
||||||
|
)
|
||||||
for camera, time in preview_write_times.copy().items():
|
for camera, time in preview_write_times.copy().items():
|
||||||
if time != 0 and frame_time - time > 10:
|
if time != 0 and frame_time - time > 10:
|
||||||
preview_recorders[camera].flag_offline(frame_time)
|
preview_recorders[camera].flag_offline(frame_time)
|
||||||
preview_write_times[camera] = frame_time
|
preview_write_times[camera] = frame_time
|
||||||
|
|
||||||
frame_manager.close(frame_id)
|
frame_manager.close(frame_name)
|
||||||
|
|
||||||
move_preview_frames("clips")
|
move_preview_frames("clips")
|
||||||
|
|
||||||
@@ -151,15 +163,15 @@ def output_frames(
|
|||||||
|
|
||||||
(
|
(
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
frame_time,
|
frame_time,
|
||||||
current_tracked_objects,
|
current_tracked_objects,
|
||||||
motion_boxes,
|
motion_boxes,
|
||||||
regions,
|
regions,
|
||||||
) = data
|
) = data
|
||||||
|
|
||||||
frame_id = f"{camera}{frame_time}"
|
frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv)
|
||||||
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
|
frame_manager.close(frame_name)
|
||||||
frame_manager.close(frame_id)
|
|
||||||
|
|
||||||
detection_subscriber.stop()
|
detection_subscriber.stop()
|
||||||
|
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ class FFMpegConverter(threading.Thread):
|
|||||||
# write a PREVIEW at fps and 1 key frame per clip
|
# write a PREVIEW at fps and 1 key frame per clip
|
||||||
self.ffmpeg_cmd = parse_preset_hardware_acceleration_encode(
|
self.ffmpeg_cmd = parse_preset_hardware_acceleration_encode(
|
||||||
config.ffmpeg.ffmpeg_path,
|
config.ffmpeg.ffmpeg_path,
|
||||||
config.ffmpeg.hwaccel_args,
|
"default",
|
||||||
input="-f concat -y -protocol_whitelist pipe,file -safe 0 -threads 1 -i /dev/stdin",
|
input="-f concat -y -protocol_whitelist pipe,file -safe 0 -threads 1 -i /dev/stdin",
|
||||||
output=f"-threads 1 -g {PREVIEW_KEYFRAME_INTERVAL} -bf 0 -b:v {PREVIEW_QUALITY_BIT_RATES[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}",
|
output=f"-threads 1 -g {PREVIEW_KEYFRAME_INTERVAL} -bf 0 -b:v {PREVIEW_QUALITY_BIT_RATES[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}",
|
||||||
type=EncodeTypeEnum.preview,
|
type=EncodeTypeEnum.preview,
|
||||||
@@ -154,6 +154,7 @@ class PreviewRecorder:
|
|||||||
self.start_time = 0
|
self.start_time = 0
|
||||||
self.last_output_time = 0
|
self.last_output_time = 0
|
||||||
self.output_frames = []
|
self.output_frames = []
|
||||||
|
|
||||||
if config.detect.width > config.detect.height:
|
if config.detect.width > config.detect.height:
|
||||||
self.out_height = PREVIEW_HEIGHT
|
self.out_height = PREVIEW_HEIGHT
|
||||||
self.out_width = (
|
self.out_width = (
|
||||||
@@ -274,7 +275,7 @@ class PreviewRecorder:
|
|||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def write_frame_to_cache(self, frame_time: float, frame) -> None:
|
def write_frame_to_cache(self, frame_time: float, frame: np.ndarray) -> None:
|
||||||
# resize yuv frame
|
# resize yuv frame
|
||||||
small_frame = np.zeros((self.out_height * 3 // 2, self.out_width), np.uint8)
|
small_frame = np.zeros((self.out_height * 3 // 2, self.out_width), np.uint8)
|
||||||
copy_yuv_to_position(
|
copy_yuv_to_position(
|
||||||
@@ -303,7 +304,7 @@ class PreviewRecorder:
|
|||||||
current_tracked_objects: list[dict[str, any]],
|
current_tracked_objects: list[dict[str, any]],
|
||||||
motion_boxes: list[list[int]],
|
motion_boxes: list[list[int]],
|
||||||
frame_time: float,
|
frame_time: float,
|
||||||
frame,
|
frame: np.ndarray,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
# check for updated record config
|
# check for updated record config
|
||||||
_, updated_record_config = self.config_subscriber.check_for_update()
|
_, updated_record_config = self.config_subscriber.check_for_update()
|
||||||
@@ -332,6 +333,10 @@ class PreviewRecorder:
|
|||||||
self.output_frames,
|
self.output_frames,
|
||||||
self.requestor,
|
self.requestor,
|
||||||
).start()
|
).start()
|
||||||
|
else:
|
||||||
|
logger.debug(
|
||||||
|
f"Not saving preview for {self.config.name} because there are no saved frames."
|
||||||
|
)
|
||||||
|
|
||||||
# reset frame cache
|
# reset frame cache
|
||||||
self.segment_end = (
|
self.segment_end = (
|
||||||
|
|||||||
@@ -59,7 +59,13 @@ class PtzMotionEstimator:
|
|||||||
self.ptz_metrics.reset.set()
|
self.ptz_metrics.reset.set()
|
||||||
logger.debug(f"{config.name}: Motion estimator init")
|
logger.debug(f"{config.name}: Motion estimator init")
|
||||||
|
|
||||||
def motion_estimator(self, detections, frame_time, camera):
|
def motion_estimator(
|
||||||
|
self,
|
||||||
|
detections: list[dict[str, any]],
|
||||||
|
frame_name: str,
|
||||||
|
frame_time: float,
|
||||||
|
camera: str,
|
||||||
|
):
|
||||||
# If we've just started up or returned to our preset, reset motion estimator for new tracking session
|
# If we've just started up or returned to our preset, reset motion estimator for new tracking session
|
||||||
if self.ptz_metrics.reset.is_set():
|
if self.ptz_metrics.reset.is_set():
|
||||||
self.ptz_metrics.reset.clear()
|
self.ptz_metrics.reset.clear()
|
||||||
@@ -92,9 +98,8 @@ class PtzMotionEstimator:
|
|||||||
f"{camera}: Motion estimator running - frame time: {frame_time}"
|
f"{camera}: Motion estimator running - frame time: {frame_time}"
|
||||||
)
|
)
|
||||||
|
|
||||||
frame_id = f"{camera}{frame_time}"
|
|
||||||
yuv_frame = self.frame_manager.get(
|
yuv_frame = self.frame_manager.get(
|
||||||
frame_id, self.camera_config.frame_shape_yuv
|
frame_name, self.camera_config.frame_shape_yuv
|
||||||
)
|
)
|
||||||
|
|
||||||
if yuv_frame is None:
|
if yuv_frame is None:
|
||||||
@@ -136,7 +141,7 @@ class PtzMotionEstimator:
|
|||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
self.frame_manager.close(frame_id)
|
self.frame_manager.close(frame_name)
|
||||||
|
|
||||||
return self.coord_transformations
|
return self.coord_transformations
|
||||||
|
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ from frigate.ffmpeg_presets import (
|
|||||||
parse_preset_hardware_acceleration_encode,
|
parse_preset_hardware_acceleration_encode,
|
||||||
)
|
)
|
||||||
from frigate.models import Export, Previews, Recordings
|
from frigate.models import Export, Previews, Recordings
|
||||||
|
from frigate.util.builtin import is_current_hour
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -235,6 +236,32 @@ class RecordingExporter(threading.Thread):
|
|||||||
|
|
||||||
def get_preview_export_command(self, video_path: str) -> list[str]:
|
def get_preview_export_command(self, video_path: str) -> list[str]:
|
||||||
playlist_lines = []
|
playlist_lines = []
|
||||||
|
codec = "-c copy"
|
||||||
|
|
||||||
|
if is_current_hour(self.start_time):
|
||||||
|
# get list of current preview frames
|
||||||
|
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
|
||||||
|
file_start = f"preview_{self.camera}"
|
||||||
|
start_file = f"{file_start}-{self.start_time}.{PREVIEW_FRAME_TYPE}"
|
||||||
|
end_file = f"{file_start}-{self.end_time}.{PREVIEW_FRAME_TYPE}"
|
||||||
|
|
||||||
|
for file in sorted(os.listdir(preview_dir)):
|
||||||
|
if not file.startswith(file_start):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if file < start_file:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if file > end_file:
|
||||||
|
break
|
||||||
|
|
||||||
|
playlist_lines.append(f"file '{os.path.join(preview_dir, file)}'")
|
||||||
|
playlist_lines.append("duration 0.12")
|
||||||
|
|
||||||
|
if playlist_lines:
|
||||||
|
last_file = playlist_lines[-2]
|
||||||
|
playlist_lines.append(last_file)
|
||||||
|
codec = "-c:v libx264"
|
||||||
|
|
||||||
# get full set of previews
|
# get full set of previews
|
||||||
export_previews = (
|
export_previews = (
|
||||||
@@ -277,7 +304,7 @@ class RecordingExporter(threading.Thread):
|
|||||||
|
|
||||||
if self.playback_factor == PlaybackFactorEnum.realtime:
|
if self.playback_factor == PlaybackFactorEnum.realtime:
|
||||||
ffmpeg_cmd = (
|
ffmpeg_cmd = (
|
||||||
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart {video_path}"
|
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} {codec} -movflags +faststart {video_path}"
|
||||||
).split(" ")
|
).split(" ")
|
||||||
elif self.playback_factor == PlaybackFactorEnum.timelapse_25x:
|
elif self.playback_factor == PlaybackFactorEnum.timelapse_25x:
|
||||||
ffmpeg_cmd = (
|
ffmpeg_cmd = (
|
||||||
|
|||||||
@@ -299,16 +299,12 @@ class RecordingMaintainer(threading.Thread):
|
|||||||
# if it doesn't overlap with an event, go ahead and drop the segment
|
# if it doesn't overlap with an event, go ahead and drop the segment
|
||||||
# if it ends more than the configured pre_capture for the camera
|
# if it ends more than the configured pre_capture for the camera
|
||||||
else:
|
else:
|
||||||
pre_capture = max(
|
|
||||||
record_config.alerts.pre_capture,
|
|
||||||
record_config.detections.pre_capture,
|
|
||||||
)
|
|
||||||
camera_info = self.object_recordings_info[camera]
|
camera_info = self.object_recordings_info[camera]
|
||||||
most_recently_processed_frame_time = (
|
most_recently_processed_frame_time = (
|
||||||
camera_info[-1][0] if len(camera_info) > 0 else 0
|
camera_info[-1][0] if len(camera_info) > 0 else 0
|
||||||
)
|
)
|
||||||
retain_cutoff = datetime.datetime.fromtimestamp(
|
retain_cutoff = datetime.datetime.fromtimestamp(
|
||||||
most_recently_processed_frame_time - pre_capture
|
most_recently_processed_frame_time - record_config.event_pre_capture
|
||||||
).astimezone(datetime.timezone.utc)
|
).astimezone(datetime.timezone.utc)
|
||||||
if end_time < retain_cutoff:
|
if end_time < retain_cutoff:
|
||||||
Path(cache_path).unlink(missing_ok=True)
|
Path(cache_path).unlink(missing_ok=True)
|
||||||
@@ -518,6 +514,7 @@ class RecordingMaintainer(threading.Thread):
|
|||||||
if topic == DetectionTypeEnum.video:
|
if topic == DetectionTypeEnum.video:
|
||||||
(
|
(
|
||||||
camera,
|
camera,
|
||||||
|
_,
|
||||||
frame_time,
|
frame_time,
|
||||||
current_tracked_objects,
|
current_tracked_objects,
|
||||||
motion_boxes,
|
motion_boxes,
|
||||||
|
|||||||
@@ -234,6 +234,7 @@ class ReviewSegmentMaintainer(threading.Thread):
|
|||||||
def update_existing_segment(
|
def update_existing_segment(
|
||||||
self,
|
self,
|
||||||
segment: PendingReviewSegment,
|
segment: PendingReviewSegment,
|
||||||
|
frame_name: str,
|
||||||
frame_time: float,
|
frame_time: float,
|
||||||
objects: list[TrackedObject],
|
objects: list[TrackedObject],
|
||||||
) -> None:
|
) -> None:
|
||||||
@@ -292,36 +293,34 @@ class ReviewSegmentMaintainer(threading.Thread):
|
|||||||
|
|
||||||
if should_update:
|
if should_update:
|
||||||
try:
|
try:
|
||||||
frame_id = f"{camera_config.name}{frame_time}"
|
|
||||||
yuv_frame = self.frame_manager.get(
|
yuv_frame = self.frame_manager.get(
|
||||||
frame_id, camera_config.frame_shape_yuv
|
frame_name, camera_config.frame_shape_yuv
|
||||||
)
|
)
|
||||||
|
|
||||||
if yuv_frame is None:
|
if yuv_frame is None:
|
||||||
logger.debug(f"Failed to get frame {frame_id} from SHM")
|
logger.debug(f"Failed to get frame {frame_name} from SHM")
|
||||||
return
|
return
|
||||||
|
|
||||||
self._publish_segment_update(
|
self._publish_segment_update(
|
||||||
segment, camera_config, yuv_frame, active_objects, prev_data
|
segment, camera_config, yuv_frame, active_objects, prev_data
|
||||||
)
|
)
|
||||||
self.frame_manager.close(frame_id)
|
self.frame_manager.close(frame_name)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
return
|
return
|
||||||
|
|
||||||
if not has_activity:
|
if not has_activity:
|
||||||
if not segment.has_frame:
|
if not segment.has_frame:
|
||||||
try:
|
try:
|
||||||
frame_id = f"{camera_config.name}{frame_time}"
|
|
||||||
yuv_frame = self.frame_manager.get(
|
yuv_frame = self.frame_manager.get(
|
||||||
frame_id, camera_config.frame_shape_yuv
|
frame_name, camera_config.frame_shape_yuv
|
||||||
)
|
)
|
||||||
|
|
||||||
if yuv_frame is None:
|
if yuv_frame is None:
|
||||||
logger.debug(f"Failed to get frame {frame_id} from SHM")
|
logger.debug(f"Failed to get frame {frame_name} from SHM")
|
||||||
return
|
return
|
||||||
|
|
||||||
segment.save_full_frame(camera_config, yuv_frame)
|
segment.save_full_frame(camera_config, yuv_frame)
|
||||||
self.frame_manager.close(frame_id)
|
self.frame_manager.close(frame_name)
|
||||||
self._publish_segment_update(
|
self._publish_segment_update(
|
||||||
segment, camera_config, None, [], prev_data
|
segment, camera_config, None, [], prev_data
|
||||||
)
|
)
|
||||||
@@ -338,6 +337,7 @@ class ReviewSegmentMaintainer(threading.Thread):
|
|||||||
def check_if_new_segment(
|
def check_if_new_segment(
|
||||||
self,
|
self,
|
||||||
camera: str,
|
camera: str,
|
||||||
|
frame_name: str,
|
||||||
frame_time: float,
|
frame_time: float,
|
||||||
objects: list[TrackedObject],
|
objects: list[TrackedObject],
|
||||||
) -> None:
|
) -> None:
|
||||||
@@ -414,19 +414,18 @@ class ReviewSegmentMaintainer(threading.Thread):
|
|||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
frame_id = f"{camera_config.name}{frame_time}"
|
|
||||||
yuv_frame = self.frame_manager.get(
|
yuv_frame = self.frame_manager.get(
|
||||||
frame_id, camera_config.frame_shape_yuv
|
frame_name, camera_config.frame_shape_yuv
|
||||||
)
|
)
|
||||||
|
|
||||||
if yuv_frame is None:
|
if yuv_frame is None:
|
||||||
logger.debug(f"Failed to get frame {frame_id} from SHM")
|
logger.debug(f"Failed to get frame {frame_name} from SHM")
|
||||||
return
|
return
|
||||||
|
|
||||||
self.active_review_segments[camera].update_frame(
|
self.active_review_segments[camera].update_frame(
|
||||||
camera_config, yuv_frame, active_objects
|
camera_config, yuv_frame, active_objects
|
||||||
)
|
)
|
||||||
self.frame_manager.close(frame_id)
|
self.frame_manager.close(frame_name)
|
||||||
self._publish_segment_start(self.active_review_segments[camera])
|
self._publish_segment_start(self.active_review_segments[camera])
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
return
|
return
|
||||||
@@ -454,16 +453,17 @@ class ReviewSegmentMaintainer(threading.Thread):
|
|||||||
if topic == DetectionTypeEnum.video:
|
if topic == DetectionTypeEnum.video:
|
||||||
(
|
(
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
frame_time,
|
frame_time,
|
||||||
current_tracked_objects,
|
current_tracked_objects,
|
||||||
motion_boxes,
|
_,
|
||||||
regions,
|
_,
|
||||||
) = data
|
) = data
|
||||||
elif topic == DetectionTypeEnum.audio:
|
elif topic == DetectionTypeEnum.audio:
|
||||||
(
|
(
|
||||||
camera,
|
camera,
|
||||||
frame_time,
|
frame_time,
|
||||||
dBFS,
|
_,
|
||||||
audio_detections,
|
audio_detections,
|
||||||
) = data
|
) = data
|
||||||
elif topic == DetectionTypeEnum.api:
|
elif topic == DetectionTypeEnum.api:
|
||||||
@@ -488,6 +488,7 @@ class ReviewSegmentMaintainer(threading.Thread):
|
|||||||
if topic == DetectionTypeEnum.video:
|
if topic == DetectionTypeEnum.video:
|
||||||
self.update_existing_segment(
|
self.update_existing_segment(
|
||||||
current_segment,
|
current_segment,
|
||||||
|
frame_name,
|
||||||
frame_time,
|
frame_time,
|
||||||
current_tracked_objects,
|
current_tracked_objects,
|
||||||
)
|
)
|
||||||
@@ -538,6 +539,7 @@ class ReviewSegmentMaintainer(threading.Thread):
|
|||||||
if topic == DetectionTypeEnum.video:
|
if topic == DetectionTypeEnum.video:
|
||||||
self.check_if_new_segment(
|
self.check_if_new_segment(
|
||||||
camera,
|
camera,
|
||||||
|
frame_name,
|
||||||
frame_time,
|
frame_time,
|
||||||
current_tracked_objects,
|
current_tracked_objects,
|
||||||
)
|
)
|
||||||
|
|||||||
0
frigate/test/http_api/__init__.py
Normal file
0
frigate/test/http_api/__init__.py
Normal file
162
frigate/test/http_api/base_http_test.py
Normal file
162
frigate/test/http_api/base_http_test.py
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
import datetime
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from peewee_migrate import Router
|
||||||
|
from playhouse.sqlite_ext import SqliteExtDatabase
|
||||||
|
from playhouse.sqliteq import SqliteQueueDatabase
|
||||||
|
|
||||||
|
from frigate.api.fastapi_app import create_fastapi_app
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.models import Event, ReviewSegment
|
||||||
|
from frigate.review.maintainer import SeverityEnum
|
||||||
|
from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS
|
||||||
|
|
||||||
|
|
||||||
|
class BaseTestHttp(unittest.TestCase):
|
||||||
|
def setUp(self, models):
|
||||||
|
# setup clean database for each test run
|
||||||
|
migrate_db = SqliteExtDatabase("test.db")
|
||||||
|
del logging.getLogger("peewee_migrate").handlers[:]
|
||||||
|
router = Router(migrate_db)
|
||||||
|
router.run()
|
||||||
|
migrate_db.close()
|
||||||
|
self.db = SqliteQueueDatabase(TEST_DB)
|
||||||
|
self.db.bind(models)
|
||||||
|
|
||||||
|
self.minimal_config = {
|
||||||
|
"mqtt": {"host": "mqtt"},
|
||||||
|
"cameras": {
|
||||||
|
"front_door": {
|
||||||
|
"ffmpeg": {
|
||||||
|
"inputs": [
|
||||||
|
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"detect": {
|
||||||
|
"height": 1080,
|
||||||
|
"width": 1920,
|
||||||
|
"fps": 5,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
self.test_stats = {
|
||||||
|
"detection_fps": 13.7,
|
||||||
|
"detectors": {
|
||||||
|
"cpu1": {
|
||||||
|
"detection_start": 0.0,
|
||||||
|
"inference_speed": 91.43,
|
||||||
|
"pid": 42,
|
||||||
|
},
|
||||||
|
"cpu2": {
|
||||||
|
"detection_start": 0.0,
|
||||||
|
"inference_speed": 84.99,
|
||||||
|
"pid": 44,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"front_door": {
|
||||||
|
"camera_fps": 0.0,
|
||||||
|
"capture_pid": 53,
|
||||||
|
"detection_fps": 0.0,
|
||||||
|
"pid": 52,
|
||||||
|
"process_fps": 0.0,
|
||||||
|
"skipped_fps": 0.0,
|
||||||
|
},
|
||||||
|
"service": {
|
||||||
|
"storage": {
|
||||||
|
"/dev/shm": {
|
||||||
|
"free": 50.5,
|
||||||
|
"mount_type": "tmpfs",
|
||||||
|
"total": 67.1,
|
||||||
|
"used": 16.6,
|
||||||
|
},
|
||||||
|
"/media/frigate/clips": {
|
||||||
|
"free": 42429.9,
|
||||||
|
"mount_type": "ext4",
|
||||||
|
"total": 244529.7,
|
||||||
|
"used": 189607.0,
|
||||||
|
},
|
||||||
|
"/media/frigate/recordings": {
|
||||||
|
"free": 0.2,
|
||||||
|
"mount_type": "ext4",
|
||||||
|
"total": 8.0,
|
||||||
|
"used": 7.8,
|
||||||
|
},
|
||||||
|
"/tmp/cache": {
|
||||||
|
"free": 976.8,
|
||||||
|
"mount_type": "tmpfs",
|
||||||
|
"total": 1000.0,
|
||||||
|
"used": 23.2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"uptime": 101113,
|
||||||
|
"version": "0.10.1",
|
||||||
|
"latest_version": "0.11",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
if not self.db.is_closed():
|
||||||
|
self.db.close()
|
||||||
|
|
||||||
|
try:
|
||||||
|
for file in TEST_DB_CLEANUPS:
|
||||||
|
os.remove(file)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def create_app(self, stats=None):
|
||||||
|
return create_fastapi_app(
|
||||||
|
FrigateConfig(**self.minimal_config),
|
||||||
|
self.db,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
stats,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
|
||||||
|
def insert_mock_event(
|
||||||
|
self,
|
||||||
|
id: str,
|
||||||
|
start_time: datetime.datetime = datetime.datetime.now().timestamp(),
|
||||||
|
) -> Event:
|
||||||
|
"""Inserts a basic event model with a given id."""
|
||||||
|
return Event.insert(
|
||||||
|
id=id,
|
||||||
|
label="Mock",
|
||||||
|
camera="front_door",
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=start_time + 20,
|
||||||
|
top_score=100,
|
||||||
|
false_positive=False,
|
||||||
|
zones=list(),
|
||||||
|
thumbnail="",
|
||||||
|
region=[],
|
||||||
|
box=[],
|
||||||
|
area=0,
|
||||||
|
has_clip=True,
|
||||||
|
has_snapshot=True,
|
||||||
|
).execute()
|
||||||
|
|
||||||
|
def insert_mock_review_segment(
|
||||||
|
self,
|
||||||
|
id: str,
|
||||||
|
start_time: datetime.datetime = datetime.datetime.now().timestamp(),
|
||||||
|
end_time: datetime.datetime = datetime.datetime.now().timestamp() + 20,
|
||||||
|
) -> Event:
|
||||||
|
"""Inserts a basic event model with a given id."""
|
||||||
|
return ReviewSegment.insert(
|
||||||
|
id=id,
|
||||||
|
camera="front_door",
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=end_time,
|
||||||
|
has_been_reviewed=False,
|
||||||
|
severity=SeverityEnum.alert,
|
||||||
|
thumb_path=False,
|
||||||
|
data={},
|
||||||
|
).execute()
|
||||||
110
frigate/test/http_api/test_http_review.py
Normal file
110
frigate/test/http_api/test_http_review.py
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
import datetime
|
||||||
|
|
||||||
|
from fastapi.testclient import TestClient
|
||||||
|
|
||||||
|
from frigate.models import Event, ReviewSegment
|
||||||
|
from frigate.test.http_api.base_http_test import BaseTestHttp
|
||||||
|
|
||||||
|
|
||||||
|
class TestHttpReview(BaseTestHttp):
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp([Event, ReviewSegment])
|
||||||
|
|
||||||
|
# Does not return any data point since the end time (before parameter) is not passed and the review segment end_time is 2 seconds from now
|
||||||
|
def test_get_review_no_filters_no_matches(self):
|
||||||
|
app = super().create_app()
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
with TestClient(app) as client:
|
||||||
|
super().insert_mock_review_segment("123456.random", now, now + 2)
|
||||||
|
reviews_response = client.get("/review")
|
||||||
|
assert reviews_response.status_code == 200
|
||||||
|
reviews_in_response = reviews_response.json()
|
||||||
|
assert len(reviews_in_response) == 0
|
||||||
|
|
||||||
|
def test_get_review_no_filters(self):
|
||||||
|
app = super().create_app()
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
with TestClient(app) as client:
|
||||||
|
super().insert_mock_review_segment("123456.random", now - 2, now - 1)
|
||||||
|
reviews_response = client.get("/review")
|
||||||
|
assert reviews_response.status_code == 200
|
||||||
|
reviews_in_response = reviews_response.json()
|
||||||
|
assert len(reviews_in_response) == 1
|
||||||
|
|
||||||
|
def test_get_review_with_time_filter_no_matches(self):
|
||||||
|
app = super().create_app()
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
with TestClient(app) as client:
|
||||||
|
id = "123456.random"
|
||||||
|
super().insert_mock_review_segment(id, now, now + 2)
|
||||||
|
params = {
|
||||||
|
"after": now,
|
||||||
|
"before": now + 3,
|
||||||
|
}
|
||||||
|
reviews_response = client.get("/review", params=params)
|
||||||
|
assert reviews_response.status_code == 200
|
||||||
|
reviews_in_response = reviews_response.json()
|
||||||
|
assert len(reviews_in_response) == 0
|
||||||
|
|
||||||
|
def test_get_review_with_time_filter(self):
|
||||||
|
app = super().create_app()
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
with TestClient(app) as client:
|
||||||
|
id = "123456.random"
|
||||||
|
super().insert_mock_review_segment(id, now, now + 2)
|
||||||
|
params = {
|
||||||
|
"after": now - 1,
|
||||||
|
"before": now + 3,
|
||||||
|
}
|
||||||
|
reviews_response = client.get("/review", params=params)
|
||||||
|
assert reviews_response.status_code == 200
|
||||||
|
reviews_in_response = reviews_response.json()
|
||||||
|
assert len(reviews_in_response) == 1
|
||||||
|
assert reviews_in_response[0]["id"] == id
|
||||||
|
|
||||||
|
def test_get_review_with_limit_filter(self):
|
||||||
|
app = super().create_app()
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
with TestClient(app) as client:
|
||||||
|
id = "123456.random"
|
||||||
|
id2 = "654321.random"
|
||||||
|
super().insert_mock_review_segment(id, now, now + 2)
|
||||||
|
super().insert_mock_review_segment(id2, now + 1, now + 2)
|
||||||
|
params = {
|
||||||
|
"limit": 1,
|
||||||
|
"after": now,
|
||||||
|
"before": now + 3,
|
||||||
|
}
|
||||||
|
reviews_response = client.get("/review", params=params)
|
||||||
|
assert reviews_response.status_code == 200
|
||||||
|
reviews_in_response = reviews_response.json()
|
||||||
|
assert len(reviews_in_response) == 1
|
||||||
|
assert reviews_in_response[0]["id"] == id2
|
||||||
|
|
||||||
|
def test_get_review_with_all_filters(self):
|
||||||
|
app = super().create_app()
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
with TestClient(app) as client:
|
||||||
|
id = "123456.random"
|
||||||
|
super().insert_mock_review_segment(id, now, now + 2)
|
||||||
|
params = {
|
||||||
|
"cameras": "front_door",
|
||||||
|
"labels": "all",
|
||||||
|
"zones": "all",
|
||||||
|
"reviewed": 0,
|
||||||
|
"limit": 1,
|
||||||
|
"severity": "alert",
|
||||||
|
"after": now - 1,
|
||||||
|
"before": now + 3,
|
||||||
|
}
|
||||||
|
reviews_response = client.get("/review", params=params)
|
||||||
|
assert reviews_response.status_code == 200
|
||||||
|
reviews_in_response = reviews_response.json()
|
||||||
|
assert len(reviews_in_response) == 1
|
||||||
|
assert reviews_in_response[0]["id"] == id
|
||||||
@@ -9,5 +9,7 @@ class ObjectTracker(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def match_and_update(self, frame_time: float, detections) -> None:
|
def match_and_update(
|
||||||
|
self, frame_name: str, frame_time: float, detections: list[dict[str, any]]
|
||||||
|
) -> None:
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -129,7 +129,7 @@ class CentroidTracker(ObjectTracker):
|
|||||||
|
|
||||||
self.tracked_objects[id].update(new_obj)
|
self.tracked_objects[id].update(new_obj)
|
||||||
|
|
||||||
def update_frame_times(self, frame_time):
|
def update_frame_times(self, frame_name, frame_time):
|
||||||
for id in list(self.tracked_objects.keys()):
|
for id in list(self.tracked_objects.keys()):
|
||||||
self.tracked_objects[id]["frame_time"] = frame_time
|
self.tracked_objects[id]["frame_time"] = frame_time
|
||||||
self.tracked_objects[id]["motionless_count"] += 1
|
self.tracked_objects[id]["motionless_count"] += 1
|
||||||
|
|||||||
@@ -268,7 +268,7 @@ class NorfairTracker(ObjectTracker):
|
|||||||
|
|
||||||
self.tracked_objects[id].update(obj)
|
self.tracked_objects[id].update(obj)
|
||||||
|
|
||||||
def update_frame_times(self, frame_time):
|
def update_frame_times(self, frame_name: str, frame_time: float):
|
||||||
# if the object was there in the last frame, assume it's still there
|
# if the object was there in the last frame, assume it's still there
|
||||||
detections = [
|
detections = [
|
||||||
(
|
(
|
||||||
@@ -282,9 +282,11 @@ class NorfairTracker(ObjectTracker):
|
|||||||
for id, obj in self.tracked_objects.items()
|
for id, obj in self.tracked_objects.items()
|
||||||
if self.disappeared[id] == 0
|
if self.disappeared[id] == 0
|
||||||
]
|
]
|
||||||
self.match_and_update(frame_time, detections=detections)
|
self.match_and_update(frame_name, frame_time, detections=detections)
|
||||||
|
|
||||||
def match_and_update(self, frame_time, detections):
|
def match_and_update(
|
||||||
|
self, frame_name: str, frame_time: float, detections: list[dict[str, any]]
|
||||||
|
):
|
||||||
norfair_detections = []
|
norfair_detections = []
|
||||||
|
|
||||||
for obj in detections:
|
for obj in detections:
|
||||||
@@ -322,7 +324,7 @@ class NorfairTracker(ObjectTracker):
|
|||||||
)
|
)
|
||||||
|
|
||||||
coord_transformations = self.ptz_motion_estimator.motion_estimator(
|
coord_transformations = self.ptz_motion_estimator.motion_estimator(
|
||||||
detections, frame_time, self.camera_name
|
detections, frame_name, frame_time, self.camera_name
|
||||||
)
|
)
|
||||||
|
|
||||||
tracked_objects = self.tracker.update(
|
tracked_objects = self.tracker.update(
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import base64
|
|||||||
import logging
|
import logging
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from statistics import median
|
from statistics import median
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -423,10 +424,11 @@ class TrackedObjectAttribute:
|
|||||||
"box": self.box,
|
"box": self.box,
|
||||||
}
|
}
|
||||||
|
|
||||||
def find_best_object(self, objects: list[dict[str, any]]) -> str:
|
def find_best_object(self, objects: list[dict[str, any]]) -> Optional[str]:
|
||||||
"""Find the best attribute for each object and return its ID."""
|
"""Find the best attribute for each object and return its ID."""
|
||||||
best_object_area = None
|
best_object_area = None
|
||||||
best_object_id = None
|
best_object_id = None
|
||||||
|
best_object_label = None
|
||||||
|
|
||||||
for obj in objects:
|
for obj in objects:
|
||||||
if not box_inside(obj["box"], self.box):
|
if not box_inside(obj["box"], self.box):
|
||||||
@@ -440,8 +442,15 @@ class TrackedObjectAttribute:
|
|||||||
if best_object_area is None:
|
if best_object_area is None:
|
||||||
best_object_area = object_area
|
best_object_area = object_area
|
||||||
best_object_id = obj["id"]
|
best_object_id = obj["id"]
|
||||||
elif object_area < best_object_area:
|
best_object_label = obj["label"]
|
||||||
best_object_area = object_area
|
else:
|
||||||
best_object_id = obj["id"]
|
if best_object_label == "car" and obj["label"] == "car":
|
||||||
|
# if multiple cars are overlapping with the same label then the label will not be assigned
|
||||||
|
return None
|
||||||
|
elif object_area < best_object_area:
|
||||||
|
# if a car and person are overlapping then assign the label to the smaller object (which should be the person)
|
||||||
|
best_object_area = object_area
|
||||||
|
best_object_id = obj["id"]
|
||||||
|
best_object_label = obj["label"]
|
||||||
|
|
||||||
return best_object_id
|
return best_object_id
|
||||||
|
|||||||
@@ -19,3 +19,7 @@ class ModelStatusTypesEnum(str, Enum):
|
|||||||
downloading = "downloading"
|
downloading = "downloading"
|
||||||
downloaded = "downloaded"
|
downloaded = "downloaded"
|
||||||
error = "error"
|
error = "error"
|
||||||
|
|
||||||
|
|
||||||
|
class TrackedObjectUpdateTypesEnum(str, Enum):
|
||||||
|
description = "description"
|
||||||
|
|||||||
@@ -282,6 +282,17 @@ def get_tomorrow_at_time(hour: int) -> datetime.datetime:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def is_current_hour(timestamp: int) -> bool:
|
||||||
|
"""Returns if timestamp is in the current UTC hour."""
|
||||||
|
start_of_next_hour = (
|
||||||
|
datetime.datetime.now(datetime.timezone.utc).replace(
|
||||||
|
minute=0, second=0, microsecond=0
|
||||||
|
)
|
||||||
|
+ datetime.timedelta(hours=1)
|
||||||
|
).timestamp()
|
||||||
|
return timestamp < start_of_next_hour
|
||||||
|
|
||||||
|
|
||||||
def clear_and_unlink(file: Path, missing_ok: bool = True) -> None:
|
def clear_and_unlink(file: Path, missing_ok: bool = True) -> None:
|
||||||
"""clear file then unlink to avoid space retained by file descriptors."""
|
"""clear file then unlink to avoid space retained by file descriptors."""
|
||||||
if not missing_ok and not file.exists():
|
if not missing_ok and not file.exists():
|
||||||
|
|||||||
@@ -29,6 +29,10 @@ def migrate_frigate_config(config_file: str):
|
|||||||
with open(config_file, "r") as f:
|
with open(config_file, "r") as f:
|
||||||
config: dict[str, dict[str, any]] = yaml.load(f)
|
config: dict[str, dict[str, any]] = yaml.load(f)
|
||||||
|
|
||||||
|
if config is None:
|
||||||
|
logger.error(f"Failed to load config at {config_file}")
|
||||||
|
return
|
||||||
|
|
||||||
previous_version = str(config.get("version", "0.13"))
|
previous_version = str(config.get("version", "0.13"))
|
||||||
|
|
||||||
if previous_version == CURRENT_CONFIG_VERSION:
|
if previous_version == CURRENT_CONFIG_VERSION:
|
||||||
@@ -46,14 +50,15 @@ def migrate_frigate_config(config_file: str):
|
|||||||
previous_version = "0.14"
|
previous_version = "0.14"
|
||||||
|
|
||||||
logger.info("Migrating export file names...")
|
logger.info("Migrating export file names...")
|
||||||
for file in os.listdir(EXPORT_DIR):
|
if os.path.isdir(EXPORT_DIR):
|
||||||
if "@" not in file:
|
for file in os.listdir(EXPORT_DIR):
|
||||||
continue
|
if "@" not in file:
|
||||||
|
continue
|
||||||
|
|
||||||
new_name = file.replace("@", "_")
|
new_name = file.replace("@", "_")
|
||||||
os.rename(
|
os.rename(
|
||||||
os.path.join(EXPORT_DIR, file), os.path.join(EXPORT_DIR, new_name)
|
os.path.join(EXPORT_DIR, file), os.path.join(EXPORT_DIR, new_name)
|
||||||
)
|
)
|
||||||
|
|
||||||
if previous_version < "0.15-0":
|
if previous_version < "0.15-0":
|
||||||
logger.info(f"Migrating frigate config from {previous_version} to 0.15-0...")
|
logger.info(f"Migrating frigate config from {previous_version} to 0.15-0...")
|
||||||
|
|||||||
@@ -3,8 +3,10 @@
|
|||||||
import datetime
|
import datetime
|
||||||
import logging
|
import logging
|
||||||
import subprocess as sp
|
import subprocess as sp
|
||||||
|
import threading
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from multiprocessing import shared_memory
|
from multiprocessing import resource_tracker as _mprt
|
||||||
|
from multiprocessing import shared_memory as _mpshm
|
||||||
from string import printable
|
from string import printable
|
||||||
from typing import AnyStr, Optional
|
from typing import AnyStr, Optional
|
||||||
|
|
||||||
@@ -715,57 +717,109 @@ def clipped(obj, frame_shape):
|
|||||||
|
|
||||||
class FrameManager(ABC):
|
class FrameManager(ABC):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def create(self, name, size) -> AnyStr:
|
def create(self, name: str, size: int) -> AnyStr:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get(self, name, timeout_ms=0):
|
def write(self, name: str) -> memoryview:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def close(self, name):
|
def get(self, name: str, timeout_ms: int = 0):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def delete(self, name):
|
def close(self, name: str):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete(self, name: str):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def cleanup(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class DictFrameManager(FrameManager):
|
class UntrackedSharedMemory(_mpshm.SharedMemory):
|
||||||
def __init__(self):
|
# https://github.com/python/cpython/issues/82300#issuecomment-2169035092
|
||||||
self.frames = {}
|
|
||||||
|
|
||||||
def create(self, name, size) -> AnyStr:
|
__lock = threading.Lock()
|
||||||
mem = bytearray(size)
|
|
||||||
self.frames[name] = mem
|
|
||||||
return mem
|
|
||||||
|
|
||||||
def get(self, name, shape):
|
def __init__(
|
||||||
mem = self.frames[name]
|
self,
|
||||||
return np.ndarray(shape, dtype=np.uint8, buffer=mem)
|
name: Optional[str] = None,
|
||||||
|
create: bool = False,
|
||||||
|
size: int = 0,
|
||||||
|
*,
|
||||||
|
track: bool = False,
|
||||||
|
) -> None:
|
||||||
|
self._track = track
|
||||||
|
|
||||||
def close(self, name):
|
# if tracking, normal init will suffice
|
||||||
pass
|
if track:
|
||||||
|
return super().__init__(name=name, create=create, size=size)
|
||||||
|
|
||||||
def delete(self, name):
|
# lock so that other threads don't attempt to use the
|
||||||
del self.frames[name]
|
# register function during this time
|
||||||
|
with self.__lock:
|
||||||
|
# temporarily disable registration during initialization
|
||||||
|
orig_register = _mprt.register
|
||||||
|
_mprt.register = self.__tmp_register
|
||||||
|
|
||||||
|
# initialize; ensure original register function is
|
||||||
|
# re-instated
|
||||||
|
try:
|
||||||
|
super().__init__(name=name, create=create, size=size)
|
||||||
|
finally:
|
||||||
|
_mprt.register = orig_register
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __tmp_register(*args, **kwargs) -> None:
|
||||||
|
return
|
||||||
|
|
||||||
|
def unlink(self) -> None:
|
||||||
|
if _mpshm._USE_POSIX and self._name:
|
||||||
|
_mpshm._posixshmem.shm_unlink(self._name)
|
||||||
|
if self._track:
|
||||||
|
_mprt.unregister(self._name, "shared_memory")
|
||||||
|
|
||||||
|
|
||||||
class SharedMemoryFrameManager(FrameManager):
|
class SharedMemoryFrameManager(FrameManager):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.shm_store: dict[str, shared_memory.SharedMemory] = {}
|
self.shm_store: dict[str, UntrackedSharedMemory] = {}
|
||||||
|
|
||||||
def create(self, name: str, size) -> AnyStr:
|
def create(self, name: str, size) -> AnyStr:
|
||||||
shm = shared_memory.SharedMemory(name=name, create=True, size=size)
|
try:
|
||||||
|
shm = UntrackedSharedMemory(
|
||||||
|
name=name,
|
||||||
|
create=True,
|
||||||
|
size=size,
|
||||||
|
)
|
||||||
|
except FileExistsError:
|
||||||
|
shm = UntrackedSharedMemory(name=name)
|
||||||
|
|
||||||
self.shm_store[name] = shm
|
self.shm_store[name] = shm
|
||||||
return shm.buf
|
return shm.buf
|
||||||
|
|
||||||
|
def write(self, name: str) -> memoryview:
|
||||||
|
try:
|
||||||
|
if name in self.shm_store:
|
||||||
|
shm = self.shm_store[name]
|
||||||
|
else:
|
||||||
|
shm = UntrackedSharedMemory(name=name)
|
||||||
|
self.shm_store[name] = shm
|
||||||
|
return shm.buf
|
||||||
|
except FileNotFoundError:
|
||||||
|
logger.info(f"the file {name} not found")
|
||||||
|
return None
|
||||||
|
|
||||||
def get(self, name: str, shape) -> Optional[np.ndarray]:
|
def get(self, name: str, shape) -> Optional[np.ndarray]:
|
||||||
try:
|
try:
|
||||||
if name in self.shm_store:
|
if name in self.shm_store:
|
||||||
shm = self.shm_store[name]
|
shm = self.shm_store[name]
|
||||||
else:
|
else:
|
||||||
shm = shared_memory.SharedMemory(name=name)
|
shm = UntrackedSharedMemory(name=name)
|
||||||
self.shm_store[name] = shm
|
self.shm_store[name] = shm
|
||||||
return np.ndarray(shape, dtype=np.uint8, buffer=shm.buf)
|
return np.ndarray(shape, dtype=np.uint8, buffer=shm.buf)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
@@ -788,12 +842,21 @@ class SharedMemoryFrameManager(FrameManager):
|
|||||||
del self.shm_store[name]
|
del self.shm_store[name]
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
shm = shared_memory.SharedMemory(name=name)
|
shm = UntrackedSharedMemory(name=name)
|
||||||
shm.close()
|
shm.close()
|
||||||
shm.unlink()
|
shm.unlink()
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def cleanup(self) -> None:
|
||||||
|
for shm in self.shm_store.values():
|
||||||
|
shm.close()
|
||||||
|
|
||||||
|
try:
|
||||||
|
shm.unlink()
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def create_mask(frame_shape, mask):
|
def create_mask(frame_shape, mask):
|
||||||
mask_img = np.zeros(frame_shape, np.uint8)
|
mask_img = np.zeros(frame_shape, np.uint8)
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
"""Model Utils"""
|
"""Model Utils"""
|
||||||
|
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
@@ -11,9 +12,11 @@ except ImportError:
|
|||||||
# openvino is not included
|
# openvino is not included
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def get_ort_providers(
|
def get_ort_providers(
|
||||||
force_cpu: bool = False, openvino_device: str = "AUTO", requires_fp16: bool = False
|
force_cpu: bool = False, device: str = "AUTO", requires_fp16: bool = False
|
||||||
) -> tuple[list[str], list[dict[str, any]]]:
|
) -> tuple[list[str], list[dict[str, any]]]:
|
||||||
if force_cpu:
|
if force_cpu:
|
||||||
return (
|
return (
|
||||||
@@ -30,15 +33,36 @@ def get_ort_providers(
|
|||||||
|
|
||||||
for provider in ort.get_available_providers():
|
for provider in ort.get_available_providers():
|
||||||
if provider == "CUDAExecutionProvider":
|
if provider == "CUDAExecutionProvider":
|
||||||
|
device_id = 0 if not device.isdigit() else int(device)
|
||||||
providers.append(provider)
|
providers.append(provider)
|
||||||
options.append(
|
options.append(
|
||||||
{
|
{
|
||||||
"arena_extend_strategy": "kSameAsRequested",
|
"arena_extend_strategy": "kSameAsRequested",
|
||||||
|
"device_id": device_id,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
elif provider == "TensorrtExecutionProvider":
|
elif provider == "TensorrtExecutionProvider":
|
||||||
# TensorrtExecutionProvider uses too much memory without options to control it
|
# TensorrtExecutionProvider uses too much memory without options to control it
|
||||||
pass
|
# so it is not enabled by default
|
||||||
|
if device == "Tensorrt":
|
||||||
|
os.makedirs(
|
||||||
|
"/config/model_cache/tensorrt/ort/trt-engines", exist_ok=True
|
||||||
|
)
|
||||||
|
device_id = 0 if not device.isdigit() else int(device)
|
||||||
|
providers.append(provider)
|
||||||
|
options.append(
|
||||||
|
{
|
||||||
|
"device_id": device_id,
|
||||||
|
"trt_fp16_enable": requires_fp16
|
||||||
|
and os.environ.get("USE_FP_16", "True") != "False",
|
||||||
|
"trt_timing_cache_enable": True,
|
||||||
|
"trt_engine_cache_enable": True,
|
||||||
|
"trt_timing_cache_path": "/config/model_cache/tensorrt/ort",
|
||||||
|
"trt_engine_cache_path": "/config/model_cache/tensorrt/ort/trt-engines",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
continue
|
||||||
elif provider == "OpenVINOExecutionProvider":
|
elif provider == "OpenVINOExecutionProvider":
|
||||||
os.makedirs("/config/model_cache/openvino/ort", exist_ok=True)
|
os.makedirs("/config/model_cache/openvino/ort", exist_ok=True)
|
||||||
providers.append(provider)
|
providers.append(provider)
|
||||||
@@ -46,7 +70,7 @@ def get_ort_providers(
|
|||||||
{
|
{
|
||||||
"arena_extend_strategy": "kSameAsRequested",
|
"arena_extend_strategy": "kSameAsRequested",
|
||||||
"cache_dir": "/config/model_cache/openvino/ort",
|
"cache_dir": "/config/model_cache/openvino/ort",
|
||||||
"device_type": openvino_device,
|
"device_type": device,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
elif provider == "CPUExecutionProvider":
|
elif provider == "CPUExecutionProvider":
|
||||||
@@ -71,19 +95,27 @@ class ONNXModelRunner:
|
|||||||
self.ort: ort.InferenceSession = None
|
self.ort: ort.InferenceSession = None
|
||||||
self.ov: ov.Core = None
|
self.ov: ov.Core = None
|
||||||
providers, options = get_ort_providers(device == "CPU", device, requires_fp16)
|
providers, options = get_ort_providers(device == "CPU", device, requires_fp16)
|
||||||
|
self.interpreter = None
|
||||||
|
|
||||||
if "OpenVINOExecutionProvider" in providers:
|
if "OpenVINOExecutionProvider" in providers:
|
||||||
# use OpenVINO directly
|
try:
|
||||||
self.type = "ov"
|
# use OpenVINO directly
|
||||||
self.ov = ov.Core()
|
self.type = "ov"
|
||||||
self.ov.set_property(
|
self.ov = ov.Core()
|
||||||
{ov.properties.cache_dir: "/config/model_cache/openvino"}
|
self.ov.set_property(
|
||||||
)
|
{ov.properties.cache_dir: "/config/model_cache/openvino"}
|
||||||
self.interpreter = self.ov.compile_model(
|
)
|
||||||
model=model_path, device_name=device
|
self.interpreter = self.ov.compile_model(
|
||||||
)
|
model=model_path, device_name=device
|
||||||
else:
|
)
|
||||||
# Use ONNXRuntime
|
except Exception as e:
|
||||||
|
logger.warning(
|
||||||
|
f"OpenVINO failed to build model, using CPU instead: {e}"
|
||||||
|
)
|
||||||
|
self.interpreter = None
|
||||||
|
|
||||||
|
# Use ONNXRuntime
|
||||||
|
if self.interpreter is None:
|
||||||
self.type = "ort"
|
self.type = "ort"
|
||||||
self.ort = ort.InferenceSession(
|
self.ort = ort.InferenceSession(
|
||||||
model_path,
|
model_path,
|
||||||
|
|||||||
@@ -584,7 +584,7 @@ async def get_video_properties(
|
|||||||
width = height = 0
|
width = height = 0
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Open the video stream
|
# Open the video stream using OpenCV
|
||||||
video = cv2.VideoCapture(url)
|
video = cv2.VideoCapture(url)
|
||||||
|
|
||||||
# Check if the video stream was opened successfully
|
# Check if the video stream was opened successfully
|
||||||
|
|||||||
@@ -94,8 +94,8 @@ def capture_frames(
|
|||||||
ffmpeg_process,
|
ffmpeg_process,
|
||||||
config: CameraConfig,
|
config: CameraConfig,
|
||||||
shm_frame_count: int,
|
shm_frame_count: int,
|
||||||
shm_frames: list[str],
|
frame_index: int,
|
||||||
frame_shape,
|
frame_shape: tuple[int, int],
|
||||||
frame_manager: FrameManager,
|
frame_manager: FrameManager,
|
||||||
frame_queue,
|
frame_queue,
|
||||||
fps: mp.Value,
|
fps: mp.Value,
|
||||||
@@ -113,21 +113,11 @@ def capture_frames(
|
|||||||
fps.value = frame_rate.eps()
|
fps.value = frame_rate.eps()
|
||||||
skipped_fps.value = skipped_eps.eps()
|
skipped_fps.value = skipped_eps.eps()
|
||||||
current_frame.value = datetime.datetime.now().timestamp()
|
current_frame.value = datetime.datetime.now().timestamp()
|
||||||
frame_name = f"{config.name}{current_frame.value}"
|
frame_name = f"{config.name}{frame_index}"
|
||||||
frame_buffer = frame_manager.create(frame_name, frame_size)
|
frame_buffer = frame_manager.write(frame_name)
|
||||||
try:
|
try:
|
||||||
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
|
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
|
||||||
|
|
||||||
# update frame cache and cleanup existing frames
|
|
||||||
shm_frames.append(frame_name)
|
|
||||||
|
|
||||||
if len(shm_frames) > shm_frame_count:
|
|
||||||
expired_frame_name = shm_frames.pop(0)
|
|
||||||
frame_manager.delete(expired_frame_name)
|
|
||||||
except Exception:
|
except Exception:
|
||||||
# always delete the frame
|
|
||||||
frame_manager.delete(frame_name)
|
|
||||||
|
|
||||||
# shutdown has been initiated
|
# shutdown has been initiated
|
||||||
if stop_event.is_set():
|
if stop_event.is_set():
|
||||||
break
|
break
|
||||||
@@ -147,12 +137,14 @@ def capture_frames(
|
|||||||
# don't lock the queue to check, just try since it should rarely be full
|
# don't lock the queue to check, just try since it should rarely be full
|
||||||
try:
|
try:
|
||||||
# add to the queue
|
# add to the queue
|
||||||
frame_queue.put(current_frame.value, False)
|
frame_queue.put((frame_name, current_frame.value), False)
|
||||||
frame_manager.close(frame_name)
|
frame_manager.close(frame_name)
|
||||||
except queue.Full:
|
except queue.Full:
|
||||||
# if the queue is full, skip this frame
|
# if the queue is full, skip this frame
|
||||||
skipped_eps.update()
|
skipped_eps.update()
|
||||||
|
|
||||||
|
frame_index = 0 if frame_index == shm_frame_count - 1 else frame_index + 1
|
||||||
|
|
||||||
|
|
||||||
class CameraWatchdog(threading.Thread):
|
class CameraWatchdog(threading.Thread):
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -160,7 +152,7 @@ class CameraWatchdog(threading.Thread):
|
|||||||
camera_name,
|
camera_name,
|
||||||
config: CameraConfig,
|
config: CameraConfig,
|
||||||
shm_frame_count: int,
|
shm_frame_count: int,
|
||||||
frame_queue,
|
frame_queue: mp.Queue,
|
||||||
camera_fps,
|
camera_fps,
|
||||||
skipped_fps,
|
skipped_fps,
|
||||||
ffmpeg_pid,
|
ffmpeg_pid,
|
||||||
@@ -171,7 +163,6 @@ class CameraWatchdog(threading.Thread):
|
|||||||
self.camera_name = camera_name
|
self.camera_name = camera_name
|
||||||
self.config = config
|
self.config = config
|
||||||
self.shm_frame_count = shm_frame_count
|
self.shm_frame_count = shm_frame_count
|
||||||
self.shm_frames: list[str] = []
|
|
||||||
self.capture_thread = None
|
self.capture_thread = None
|
||||||
self.ffmpeg_detect_process = None
|
self.ffmpeg_detect_process = None
|
||||||
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect")
|
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect")
|
||||||
@@ -183,6 +174,7 @@ class CameraWatchdog(threading.Thread):
|
|||||||
self.frame_shape = self.config.frame_shape_yuv
|
self.frame_shape = self.config.frame_shape_yuv
|
||||||
self.frame_size = self.frame_shape[0] * self.frame_shape[1]
|
self.frame_size = self.frame_shape[0] * self.frame_shape[1]
|
||||||
self.fps_overflow_count = 0
|
self.fps_overflow_count = 0
|
||||||
|
self.frame_index = 0
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
self.sleeptime = self.config.ffmpeg.retry_interval
|
self.sleeptime = self.config.ffmpeg.retry_interval
|
||||||
|
|
||||||
@@ -304,7 +296,7 @@ class CameraWatchdog(threading.Thread):
|
|||||||
self.capture_thread = CameraCapture(
|
self.capture_thread = CameraCapture(
|
||||||
self.config,
|
self.config,
|
||||||
self.shm_frame_count,
|
self.shm_frame_count,
|
||||||
self.shm_frames,
|
self.frame_index,
|
||||||
self.ffmpeg_detect_process,
|
self.ffmpeg_detect_process,
|
||||||
self.frame_shape,
|
self.frame_shape,
|
||||||
self.frame_queue,
|
self.frame_queue,
|
||||||
@@ -345,10 +337,10 @@ class CameraCapture(threading.Thread):
|
|||||||
self,
|
self,
|
||||||
config: CameraConfig,
|
config: CameraConfig,
|
||||||
shm_frame_count: int,
|
shm_frame_count: int,
|
||||||
shm_frames: list[str],
|
frame_index: int,
|
||||||
ffmpeg_process,
|
ffmpeg_process,
|
||||||
frame_shape,
|
frame_shape: tuple[int, int],
|
||||||
frame_queue,
|
frame_queue: mp.Queue,
|
||||||
fps,
|
fps,
|
||||||
skipped_fps,
|
skipped_fps,
|
||||||
stop_event,
|
stop_event,
|
||||||
@@ -357,7 +349,7 @@ class CameraCapture(threading.Thread):
|
|||||||
self.name = f"capture:{config.name}"
|
self.name = f"capture:{config.name}"
|
||||||
self.config = config
|
self.config = config
|
||||||
self.shm_frame_count = shm_frame_count
|
self.shm_frame_count = shm_frame_count
|
||||||
self.shm_frames = shm_frames
|
self.frame_index = frame_index
|
||||||
self.frame_shape = frame_shape
|
self.frame_shape = frame_shape
|
||||||
self.frame_queue = frame_queue
|
self.frame_queue = frame_queue
|
||||||
self.fps = fps
|
self.fps = fps
|
||||||
@@ -373,7 +365,7 @@ class CameraCapture(threading.Thread):
|
|||||||
self.ffmpeg_process,
|
self.ffmpeg_process,
|
||||||
self.config,
|
self.config,
|
||||||
self.shm_frame_count,
|
self.shm_frame_count,
|
||||||
self.shm_frames,
|
self.frame_index,
|
||||||
self.frame_shape,
|
self.frame_shape,
|
||||||
self.frame_manager,
|
self.frame_manager,
|
||||||
self.frame_queue,
|
self.frame_queue,
|
||||||
@@ -479,8 +471,8 @@ def track_camera(
|
|||||||
# empty the frame queue
|
# empty the frame queue
|
||||||
logger.info(f"{name}: emptying frame queue")
|
logger.info(f"{name}: emptying frame queue")
|
||||||
while not frame_queue.empty():
|
while not frame_queue.empty():
|
||||||
frame_time = frame_queue.get(False)
|
(frame_name, _) = frame_queue.get(False)
|
||||||
frame_manager.delete(f"{name}{frame_time}")
|
frame_manager.delete(frame_name)
|
||||||
|
|
||||||
logger.info(f"{name}: exiting subprocess")
|
logger.info(f"{name}: exiting subprocess")
|
||||||
|
|
||||||
@@ -576,9 +568,9 @@ def process_frames(
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
if exit_on_empty:
|
if exit_on_empty:
|
||||||
frame_time = frame_queue.get(False)
|
frame_name, frame_time = frame_queue.get(False)
|
||||||
else:
|
else:
|
||||||
frame_time = frame_queue.get(True, 1)
|
frame_name, frame_time = frame_queue.get(True, 1)
|
||||||
except queue.Empty:
|
except queue.Empty:
|
||||||
if exit_on_empty:
|
if exit_on_empty:
|
||||||
logger.info("Exiting track_objects...")
|
logger.info("Exiting track_objects...")
|
||||||
@@ -588,9 +580,7 @@ def process_frames(
|
|||||||
camera_metrics.detection_frame.value = frame_time
|
camera_metrics.detection_frame.value = frame_time
|
||||||
ptz_metrics.frame_time.value = frame_time
|
ptz_metrics.frame_time.value = frame_time
|
||||||
|
|
||||||
frame = frame_manager.get(
|
frame = frame_manager.get(frame_name, (frame_shape[0] * 3 // 2, frame_shape[1]))
|
||||||
f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1])
|
|
||||||
)
|
|
||||||
|
|
||||||
if frame is None:
|
if frame is None:
|
||||||
logger.debug(f"{camera_name}: frame {frame_time} is not in memory store.")
|
logger.debug(f"{camera_name}: frame {frame_time} is not in memory store.")
|
||||||
@@ -604,7 +594,7 @@ def process_frames(
|
|||||||
|
|
||||||
# if detection is disabled
|
# if detection is disabled
|
||||||
if not detect_config.enabled:
|
if not detect_config.enabled:
|
||||||
object_tracker.match_and_update(frame_time, [])
|
object_tracker.match_and_update(frame_name, frame_time, [])
|
||||||
else:
|
else:
|
||||||
# get stationary object ids
|
# get stationary object ids
|
||||||
# check every Nth frame for stationary objects
|
# check every Nth frame for stationary objects
|
||||||
@@ -728,10 +718,12 @@ def process_frames(
|
|||||||
if d[0] not in model_config.all_attributes
|
if d[0] not in model_config.all_attributes
|
||||||
]
|
]
|
||||||
# now that we have refined our detections, we need to track objects
|
# now that we have refined our detections, we need to track objects
|
||||||
object_tracker.match_and_update(frame_time, tracked_detections)
|
object_tracker.match_and_update(
|
||||||
|
frame_name, frame_time, tracked_detections
|
||||||
|
)
|
||||||
# else, just update the frame times for the stationary objects
|
# else, just update the frame times for the stationary objects
|
||||||
else:
|
else:
|
||||||
object_tracker.update_frame_times(frame_time)
|
object_tracker.update_frame_times(frame_name, frame_time)
|
||||||
|
|
||||||
# group the attribute detections based on what label they apply to
|
# group the attribute detections based on what label they apply to
|
||||||
attribute_detections: dict[str, list[TrackedObjectAttribute]] = {}
|
attribute_detections: dict[str, list[TrackedObjectAttribute]] = {}
|
||||||
@@ -836,7 +828,7 @@ def process_frames(
|
|||||||
)
|
)
|
||||||
# add to the queue if not full
|
# add to the queue if not full
|
||||||
if detected_objects_queue.full():
|
if detected_objects_queue.full():
|
||||||
frame_manager.delete(f"{camera_name}{frame_time}")
|
frame_manager.close(frame_name)
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
fps_tracker.update()
|
fps_tracker.update()
|
||||||
@@ -844,6 +836,7 @@ def process_frames(
|
|||||||
detected_objects_queue.put(
|
detected_objects_queue.put(
|
||||||
(
|
(
|
||||||
camera_name,
|
camera_name,
|
||||||
|
frame_name,
|
||||||
frame_time,
|
frame_time,
|
||||||
detections,
|
detections,
|
||||||
motion_boxes,
|
motion_boxes,
|
||||||
@@ -851,7 +844,7 @@ def process_frames(
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
camera_metrics.detection_fps.value = object_detector.fps.eps()
|
camera_metrics.detection_fps.value = object_detector.fps.eps()
|
||||||
frame_manager.close(f"{camera_name}{frame_time}")
|
frame_manager.close(frame_name)
|
||||||
|
|
||||||
motion_detector.stop()
|
motion_detector.stop()
|
||||||
requestor.stop()
|
requestor.stop()
|
||||||
|
|||||||
36
migrations/027_create_explore_index.py
Normal file
36
migrations/027_create_explore_index.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
"""Peewee migrations -- 027_create_explore_index.py.
|
||||||
|
|
||||||
|
Some examples (model - class or model name)::
|
||||||
|
|
||||||
|
> Model = migrator.orm['model_name'] # Return model in current state by name
|
||||||
|
|
||||||
|
> migrator.sql(sql) # Run custom SQL
|
||||||
|
> migrator.python(func, *args, **kwargs) # Run python code
|
||||||
|
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||||
|
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||||
|
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||||
|
> migrator.change_fields(model, **fields) # Change fields
|
||||||
|
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||||
|
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||||
|
> migrator.rename_table(model, new_table_name)
|
||||||
|
> migrator.add_index(model, *col_names, unique=False)
|
||||||
|
> migrator.drop_index(model, *col_names)
|
||||||
|
> migrator.add_not_null(model, *field_names)
|
||||||
|
> migrator.drop_not_null(model, *field_names)
|
||||||
|
> migrator.add_default(model, field_name, default)
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import peewee as pw
|
||||||
|
|
||||||
|
SQL = pw.SQL
|
||||||
|
|
||||||
|
|
||||||
|
def migrate(migrator, database, fake=False, **kwargs):
|
||||||
|
migrator.sql(
|
||||||
|
'CREATE INDEX IF NOT EXISTS "event_label_start_time" ON "event" ("label", "start_time" DESC)'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def rollback(migrator, database, fake=False, **kwargs):
|
||||||
|
migrator.sql('DROP INDEX IF EXISTS "event_label_start_time"')
|
||||||
@@ -69,7 +69,10 @@ function useValue(): useValueReturn {
|
|||||||
...prevState,
|
...prevState,
|
||||||
...cameraStates,
|
...cameraStates,
|
||||||
}));
|
}));
|
||||||
setHasCameraState(true);
|
|
||||||
|
if (Object.keys(cameraStates).length > 0) {
|
||||||
|
setHasCameraState(true);
|
||||||
|
}
|
||||||
// we only want this to run initially when the config is loaded
|
// we only want this to run initially when the config is loaded
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
}, [wsState]);
|
}, [wsState]);
|
||||||
@@ -93,6 +96,9 @@ function useValue(): useValueReturn {
|
|||||||
retain: false,
|
retain: false,
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
onClose: () => {
|
||||||
|
setHasCameraState(false);
|
||||||
|
},
|
||||||
shouldReconnect: () => true,
|
shouldReconnect: () => true,
|
||||||
retryOnError: true,
|
retryOnError: true,
|
||||||
});
|
});
|
||||||
@@ -401,9 +407,9 @@ export function useImproveContrast(camera: string): {
|
|||||||
return { payload: payload as ToggleableSetting, send };
|
return { payload: payload as ToggleableSetting, send };
|
||||||
}
|
}
|
||||||
|
|
||||||
export function useEventUpdate(): { payload: string } {
|
export function useTrackedObjectUpdate(): { payload: string } {
|
||||||
const {
|
const {
|
||||||
value: { payload },
|
value: { payload },
|
||||||
} = useWs("event_update", "");
|
} = useWs("tracked_object_update", "");
|
||||||
return useDeepMemo(JSON.parse(payload as string));
|
return useDeepMemo(JSON.parse(payload as string));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,191 +0,0 @@
|
|||||||
import { FrigateConfig } from "@/types/frigateConfig";
|
|
||||||
import { GraphDataPoint } from "@/types/graph";
|
|
||||||
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
|
|
||||||
import useSWR from "swr";
|
|
||||||
import ActivityIndicator from "../indicators/activity-indicator";
|
|
||||||
|
|
||||||
type TimelineBarProps = {
|
|
||||||
startTime: number;
|
|
||||||
graphData:
|
|
||||||
| {
|
|
||||||
objects: number[];
|
|
||||||
motion: GraphDataPoint[];
|
|
||||||
}
|
|
||||||
| undefined;
|
|
||||||
onClick?: () => void;
|
|
||||||
};
|
|
||||||
export default function TimelineBar({
|
|
||||||
startTime,
|
|
||||||
graphData,
|
|
||||||
onClick,
|
|
||||||
}: TimelineBarProps) {
|
|
||||||
const { data: config } = useSWR<FrigateConfig>("config");
|
|
||||||
|
|
||||||
if (!config) {
|
|
||||||
return <ActivityIndicator />;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div
|
|
||||||
className="h-18 my-1 w-full cursor-pointer rounded border p-1 hover:bg-secondary hover:bg-opacity-30"
|
|
||||||
onClick={onClick}
|
|
||||||
>
|
|
||||||
{graphData != undefined && (
|
|
||||||
<div className="relative flex h-8 w-full">
|
|
||||||
{getHourBlocks().map((idx) => {
|
|
||||||
return (
|
|
||||||
<div
|
|
||||||
key={idx}
|
|
||||||
className={`h-2 flex-auto ${
|
|
||||||
(graphData.motion.at(idx)?.y || 0) == 0
|
|
||||||
? ""
|
|
||||||
: graphData.objects.includes(idx)
|
|
||||||
? "bg-object"
|
|
||||||
: "bg-motion"
|
|
||||||
}`}
|
|
||||||
/>
|
|
||||||
);
|
|
||||||
})}
|
|
||||||
<div className="absolute bottom-0 left-0 top-0 border-l border-gray-500 align-bottom">
|
|
||||||
<div className="absolute bottom-0 ml-1 text-sm text-gray-500">
|
|
||||||
{formatUnixTimestampToDateTime(startTime, {
|
|
||||||
strftime_fmt:
|
|
||||||
config?.ui.time_format == "24hour" ? "%H:00" : "%I:00%P",
|
|
||||||
time_style: "medium",
|
|
||||||
date_style: "medium",
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="absolute bottom-0 left-[8.3%] top-0 border-l border-gray-500 align-bottom">
|
|
||||||
<div className="absolute bottom-0 ml-1 text-sm text-gray-500">
|
|
||||||
{formatUnixTimestampToDateTime(startTime, {
|
|
||||||
strftime_fmt:
|
|
||||||
config?.ui.time_format == "24hour" ? "%H:05" : "%I:05%P",
|
|
||||||
time_style: "medium",
|
|
||||||
date_style: "medium",
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="absolute bottom-0 left-[16.7%] top-0 border-l border-gray-500 align-bottom">
|
|
||||||
<div className="absolute bottom-0 ml-1 text-sm text-gray-500">
|
|
||||||
{formatUnixTimestampToDateTime(startTime, {
|
|
||||||
strftime_fmt:
|
|
||||||
config?.ui.time_format == "24hour" ? "%H:10" : "%I:10%P",
|
|
||||||
time_style: "medium",
|
|
||||||
date_style: "medium",
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="absolute bottom-0 left-[25%] top-0 border-l border-gray-500 align-bottom">
|
|
||||||
<div className="absolute bottom-0 ml-1 text-sm text-gray-500">
|
|
||||||
{formatUnixTimestampToDateTime(startTime, {
|
|
||||||
strftime_fmt:
|
|
||||||
config?.ui.time_format == "24hour" ? "%H:15" : "%I:15%P",
|
|
||||||
time_style: "medium",
|
|
||||||
date_style: "medium",
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="absolute bottom-0 left-[33.3%] top-0 border-l border-gray-500 align-bottom">
|
|
||||||
<div className="absolute bottom-0 ml-1 text-sm text-gray-500">
|
|
||||||
{formatUnixTimestampToDateTime(startTime, {
|
|
||||||
strftime_fmt:
|
|
||||||
config?.ui.time_format == "24hour" ? "%H:20" : "%I:20%P",
|
|
||||||
time_style: "medium",
|
|
||||||
date_style: "medium",
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="absolute bottom-0 left-[41.7%] top-0 border-l border-gray-500 align-bottom">
|
|
||||||
<div className="absolute bottom-0 ml-1 text-sm text-gray-500">
|
|
||||||
{formatUnixTimestampToDateTime(startTime, {
|
|
||||||
strftime_fmt:
|
|
||||||
config?.ui.time_format == "24hour" ? "%H:25" : "%I:25%P",
|
|
||||||
time_style: "medium",
|
|
||||||
date_style: "medium",
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="absolute bottom-0 left-[50%] top-0 border-l border-gray-500 align-bottom">
|
|
||||||
<div className="absolute bottom-0 ml-1 text-sm text-gray-500">
|
|
||||||
{formatUnixTimestampToDateTime(startTime, {
|
|
||||||
strftime_fmt:
|
|
||||||
config?.ui.time_format == "24hour" ? "%H:30" : "%I:30%P",
|
|
||||||
time_style: "medium",
|
|
||||||
date_style: "medium",
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="absolute bottom-0 left-[58.3%] top-0 border-l border-gray-500 align-bottom">
|
|
||||||
<div className="absolute bottom-0 ml-1 text-sm text-gray-500">
|
|
||||||
{formatUnixTimestampToDateTime(startTime, {
|
|
||||||
strftime_fmt:
|
|
||||||
config?.ui.time_format == "24hour" ? "%H:35" : "%I:35%P",
|
|
||||||
time_style: "medium",
|
|
||||||
date_style: "medium",
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="absolute bottom-0 left-[66.7%] top-0 border-l border-gray-500 align-bottom">
|
|
||||||
<div className="absolute bottom-0 ml-1 text-sm text-gray-500">
|
|
||||||
{formatUnixTimestampToDateTime(startTime, {
|
|
||||||
strftime_fmt:
|
|
||||||
config?.ui.time_format == "24hour" ? "%H:40" : "%I:40%P",
|
|
||||||
time_style: "medium",
|
|
||||||
date_style: "medium",
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="absolute bottom-0 left-[75%] top-0 border-l border-gray-500 align-bottom">
|
|
||||||
<div className="absolute bottom-0 ml-1 text-sm text-gray-500">
|
|
||||||
{formatUnixTimestampToDateTime(startTime, {
|
|
||||||
strftime_fmt:
|
|
||||||
config?.ui.time_format == "24hour" ? "%H:45" : "%I:45%P",
|
|
||||||
time_style: "medium",
|
|
||||||
date_style: "medium",
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="absolute bottom-0 left-[83.3%] top-0 border-l border-gray-500 align-bottom">
|
|
||||||
<div className="absolute bottom-0 ml-1 text-sm text-gray-500">
|
|
||||||
{formatUnixTimestampToDateTime(startTime, {
|
|
||||||
strftime_fmt:
|
|
||||||
config?.ui.time_format == "24hour" ? "%H:50" : "%I:50%P",
|
|
||||||
time_style: "medium",
|
|
||||||
date_style: "medium",
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="absolute bottom-0 left-[91.7%] top-0 border-l border-gray-500 align-bottom">
|
|
||||||
<div className="absolute bottom-0 ml-1 text-sm text-gray-500">
|
|
||||||
{formatUnixTimestampToDateTime(startTime, {
|
|
||||||
strftime_fmt:
|
|
||||||
config?.ui.time_format == "24hour" ? "%H:55" : "%I:55%P",
|
|
||||||
time_style: "medium",
|
|
||||||
date_style: "medium",
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
<div className="text-gray-500">
|
|
||||||
{formatUnixTimestampToDateTime(startTime, {
|
|
||||||
strftime_fmt:
|
|
||||||
config.ui.time_format == "24hour" ? "%m/%d %H:%M" : "%m/%d %I:%M%P",
|
|
||||||
time_style: "medium",
|
|
||||||
date_style: "medium",
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
function getHourBlocks() {
|
|
||||||
const arr = [];
|
|
||||||
|
|
||||||
for (let x = 0; x <= 59; x++) {
|
|
||||||
arr.push(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
return arr;
|
|
||||||
}
|
|
||||||
@@ -1,7 +1,5 @@
|
|||||||
import { useState } from "react";
|
|
||||||
import { Button } from "@/components/ui/button";
|
import { Button } from "@/components/ui/button";
|
||||||
import { toast } from "sonner";
|
import { toast } from "sonner";
|
||||||
import ActivityIndicator from "../indicators/activity-indicator";
|
|
||||||
import { FaDownload } from "react-icons/fa";
|
import { FaDownload } from "react-icons/fa";
|
||||||
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
|
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
@@ -19,8 +17,6 @@ export function DownloadVideoButton({
|
|||||||
startTime,
|
startTime,
|
||||||
className,
|
className,
|
||||||
}: DownloadVideoButtonProps) {
|
}: DownloadVideoButtonProps) {
|
||||||
const [isDownloading, setIsDownloading] = useState(false);
|
|
||||||
|
|
||||||
const formattedDate = formatUnixTimestampToDateTime(startTime, {
|
const formattedDate = formatUnixTimestampToDateTime(startTime, {
|
||||||
strftime_fmt: "%D-%T",
|
strftime_fmt: "%D-%T",
|
||||||
time_style: "medium",
|
time_style: "medium",
|
||||||
@@ -29,7 +25,6 @@ export function DownloadVideoButton({
|
|||||||
const filename = `${camera}_${formattedDate}.mp4`;
|
const filename = `${camera}_${formattedDate}.mp4`;
|
||||||
|
|
||||||
const handleDownloadStart = () => {
|
const handleDownloadStart = () => {
|
||||||
setIsDownloading(true);
|
|
||||||
toast.success("Your review item video has started downloading.", {
|
toast.success("Your review item video has started downloading.", {
|
||||||
position: "top-center",
|
position: "top-center",
|
||||||
});
|
});
|
||||||
@@ -39,19 +34,14 @@ export function DownloadVideoButton({
|
|||||||
<div className="flex justify-center">
|
<div className="flex justify-center">
|
||||||
<Button
|
<Button
|
||||||
asChild
|
asChild
|
||||||
disabled={isDownloading}
|
|
||||||
className="flex items-center gap-2"
|
className="flex items-center gap-2"
|
||||||
size="sm"
|
size="sm"
|
||||||
aria-label="Download Video"
|
aria-label="Download Video"
|
||||||
>
|
>
|
||||||
<a href={source} download={filename} onClick={handleDownloadStart}>
|
<a href={source} download={filename} onClick={handleDownloadStart}>
|
||||||
{isDownloading ? (
|
<FaDownload
|
||||||
<ActivityIndicator className="size-4" />
|
className={cn("size-4 text-secondary-foreground", className)}
|
||||||
) : (
|
/>
|
||||||
<FaDownload
|
|
||||||
className={cn("size-4 text-secondary-foreground", className)}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
</a>
|
</a>
|
||||||
</Button>
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -41,14 +41,6 @@ export default function SearchThumbnail({
|
|||||||
return searchResult.label;
|
return searchResult.label;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (
|
|
||||||
config.model.attributes_map[searchResult.label].includes(
|
|
||||||
searchResult.sub_label,
|
|
||||||
)
|
|
||||||
) {
|
|
||||||
return searchResult.sub_label;
|
|
||||||
}
|
|
||||||
|
|
||||||
return `${searchResult.label}-verified`;
|
return `${searchResult.label}-verified`;
|
||||||
}, [config, searchResult]);
|
}, [config, searchResult]);
|
||||||
|
|
||||||
@@ -102,7 +94,7 @@ export default function SearchThumbnail({
|
|||||||
</div>
|
</div>
|
||||||
<TooltipPortal>
|
<TooltipPortal>
|
||||||
<TooltipContent className="capitalize">
|
<TooltipContent className="capitalize">
|
||||||
{[objectLabel]
|
{[searchResult.sub_label ?? objectLabel]
|
||||||
.filter(
|
.filter(
|
||||||
(item) => item !== undefined && !item.includes("-verified"),
|
(item) => item !== undefined && !item.includes("-verified"),
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ type SearchThumbnailProps = {
|
|||||||
findSimilar: () => void;
|
findSimilar: () => void;
|
||||||
refreshResults: () => void;
|
refreshResults: () => void;
|
||||||
showObjectLifecycle: () => void;
|
showObjectLifecycle: () => void;
|
||||||
|
showSnapshot: () => void;
|
||||||
};
|
};
|
||||||
|
|
||||||
export default function SearchThumbnailFooter({
|
export default function SearchThumbnailFooter({
|
||||||
@@ -21,6 +22,7 @@ export default function SearchThumbnailFooter({
|
|||||||
findSimilar,
|
findSimilar,
|
||||||
refreshResults,
|
refreshResults,
|
||||||
showObjectLifecycle,
|
showObjectLifecycle,
|
||||||
|
showSnapshot,
|
||||||
}: SearchThumbnailProps) {
|
}: SearchThumbnailProps) {
|
||||||
const { data: config } = useSWR<FrigateConfig>("config");
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
|
|
||||||
@@ -34,9 +36,8 @@ export default function SearchThumbnailFooter({
|
|||||||
return (
|
return (
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"flex w-full flex-row items-center justify-between",
|
"flex w-full flex-row items-center justify-between gap-2",
|
||||||
columns > 4 &&
|
columns > 4 && "items-start sm:flex-col lg:flex-row lg:items-center",
|
||||||
"items-start sm:flex-col sm:gap-2 lg:flex-row lg:items-center lg:gap-1",
|
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
<div className="flex flex-col items-start text-xs text-primary-variant">
|
<div className="flex flex-col items-start text-xs text-primary-variant">
|
||||||
@@ -49,12 +50,13 @@ export default function SearchThumbnailFooter({
|
|||||||
)}
|
)}
|
||||||
{formattedDate}
|
{formattedDate}
|
||||||
</div>
|
</div>
|
||||||
<div className="flex flex-row items-center justify-end gap-6 md:gap-4">
|
<div className="flex flex-row items-center justify-end gap-5 md:gap-4">
|
||||||
<SearchResultActions
|
<SearchResultActions
|
||||||
searchResult={searchResult}
|
searchResult={searchResult}
|
||||||
findSimilar={findSimilar}
|
findSimilar={findSimilar}
|
||||||
refreshResults={refreshResults}
|
refreshResults={refreshResults}
|
||||||
showObjectLifecycle={showObjectLifecycle}
|
showObjectLifecycle={showObjectLifecycle}
|
||||||
|
showSnapshot={showSnapshot}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import MobileReviewSettingsDrawer, {
|
|||||||
} from "../overlay/MobileReviewSettingsDrawer";
|
} from "../overlay/MobileReviewSettingsDrawer";
|
||||||
import useOptimisticState from "@/hooks/use-optimistic-state";
|
import useOptimisticState from "@/hooks/use-optimistic-state";
|
||||||
import FilterSwitch from "./FilterSwitch";
|
import FilterSwitch from "./FilterSwitch";
|
||||||
import { FilterList } from "@/types/filter";
|
import { FilterList, GeneralFilter } from "@/types/filter";
|
||||||
import CalendarFilterButton from "./CalendarFilterButton";
|
import CalendarFilterButton from "./CalendarFilterButton";
|
||||||
import { CamerasFilterButton } from "./CamerasFilterButton";
|
import { CamerasFilterButton } from "./CamerasFilterButton";
|
||||||
import PlatformAwareDialog from "../overlay/dialog/PlatformAwareDialog";
|
import PlatformAwareDialog from "../overlay/dialog/PlatformAwareDialog";
|
||||||
@@ -214,15 +214,9 @@ export default function ReviewFilterGroup({
|
|||||||
showAll={filter?.showAll == true}
|
showAll={filter?.showAll == true}
|
||||||
allZones={filterValues.zones}
|
allZones={filterValues.zones}
|
||||||
selectedZones={filter?.zones}
|
selectedZones={filter?.zones}
|
||||||
setShowAll={(showAll) => {
|
onUpdateFilter={(general) => {
|
||||||
onUpdateFilter({ ...filter, showAll });
|
onUpdateFilter({ ...filter, ...general });
|
||||||
}}
|
}}
|
||||||
updateLabelFilter={(newLabels) => {
|
|
||||||
onUpdateFilter({ ...filter, labels: newLabels });
|
|
||||||
}}
|
|
||||||
updateZoneFilter={(newZones) =>
|
|
||||||
onUpdateFilter({ ...filter, zones: newZones })
|
|
||||||
}
|
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
{isMobile && mobileSettingsFeatures.length > 0 && (
|
{isMobile && mobileSettingsFeatures.length > 0 && (
|
||||||
@@ -300,37 +294,40 @@ type GeneralFilterButtonProps = {
|
|||||||
showAll: boolean;
|
showAll: boolean;
|
||||||
allZones: string[];
|
allZones: string[];
|
||||||
selectedZones?: string[];
|
selectedZones?: string[];
|
||||||
setShowAll: (showAll: boolean) => void;
|
filter?: GeneralFilter;
|
||||||
updateLabelFilter: (labels: string[] | undefined) => void;
|
onUpdateFilter: (filter: GeneralFilter) => void;
|
||||||
updateZoneFilter: (zones: string[] | undefined) => void;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
function GeneralFilterButton({
|
function GeneralFilterButton({
|
||||||
allLabels,
|
allLabels,
|
||||||
selectedLabels,
|
selectedLabels,
|
||||||
|
filter,
|
||||||
currentSeverity,
|
currentSeverity,
|
||||||
showAll,
|
showAll,
|
||||||
allZones,
|
allZones,
|
||||||
selectedZones,
|
selectedZones,
|
||||||
setShowAll,
|
onUpdateFilter,
|
||||||
updateLabelFilter,
|
|
||||||
updateZoneFilter,
|
|
||||||
}: GeneralFilterButtonProps) {
|
}: GeneralFilterButtonProps) {
|
||||||
const [open, setOpen] = useState(false);
|
const [open, setOpen] = useState(false);
|
||||||
const [currentLabels, setCurrentLabels] = useState<string[] | undefined>(
|
const [currentFilter, setCurrentFilter] = useState<GeneralFilter>({
|
||||||
selectedLabels,
|
labels: selectedLabels,
|
||||||
);
|
zones: selectedZones,
|
||||||
const [currentZones, setCurrentZones] = useState<string[] | undefined>(
|
showAll: showAll,
|
||||||
selectedZones,
|
...filter,
|
||||||
);
|
});
|
||||||
|
|
||||||
// ui
|
// Update local state when props change
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
setCurrentLabels(selectedLabels);
|
setCurrentFilter({
|
||||||
setCurrentZones(selectedZones);
|
labels: selectedLabels,
|
||||||
|
zones: selectedZones,
|
||||||
|
showAll: showAll,
|
||||||
|
...filter,
|
||||||
|
});
|
||||||
// only refresh when state changes
|
// only refresh when state changes
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
}, [selectedLabels, selectedZones]);
|
}, [selectedLabels, selectedZones, showAll, filter]);
|
||||||
|
|
||||||
const trigger = (
|
const trigger = (
|
||||||
<Button
|
<Button
|
||||||
@@ -342,10 +339,18 @@ function GeneralFilterButton({
|
|||||||
aria-label="Filter"
|
aria-label="Filter"
|
||||||
>
|
>
|
||||||
<FaFilter
|
<FaFilter
|
||||||
className={`${selectedLabels?.length || selectedZones?.length ? "text-selected-foreground" : "text-secondary-foreground"}`}
|
className={`${
|
||||||
|
selectedLabels?.length || selectedZones?.length
|
||||||
|
? "text-selected-foreground"
|
||||||
|
: "text-secondary-foreground"
|
||||||
|
}`}
|
||||||
/>
|
/>
|
||||||
<div
|
<div
|
||||||
className={`hidden md:block ${selectedLabels?.length || selectedZones?.length ? "text-selected-foreground" : "text-primary"}`}
|
className={`hidden md:block ${
|
||||||
|
selectedLabels?.length || selectedZones?.length
|
||||||
|
? "text-selected-foreground"
|
||||||
|
: "text-primary"
|
||||||
|
}`}
|
||||||
>
|
>
|
||||||
Filter
|
Filter
|
||||||
</div>
|
</div>
|
||||||
@@ -355,17 +360,26 @@ function GeneralFilterButton({
|
|||||||
<GeneralFilterContent
|
<GeneralFilterContent
|
||||||
allLabels={allLabels}
|
allLabels={allLabels}
|
||||||
selectedLabels={selectedLabels}
|
selectedLabels={selectedLabels}
|
||||||
currentLabels={currentLabels}
|
|
||||||
currentSeverity={currentSeverity}
|
currentSeverity={currentSeverity}
|
||||||
showAll={showAll}
|
|
||||||
allZones={allZones}
|
allZones={allZones}
|
||||||
|
filter={currentFilter}
|
||||||
selectedZones={selectedZones}
|
selectedZones={selectedZones}
|
||||||
currentZones={currentZones}
|
onUpdateFilter={setCurrentFilter}
|
||||||
setCurrentZones={setCurrentZones}
|
onApply={() => {
|
||||||
updateZoneFilter={updateZoneFilter}
|
if (currentFilter !== filter) {
|
||||||
setShowAll={setShowAll}
|
onUpdateFilter(currentFilter);
|
||||||
updateLabelFilter={updateLabelFilter}
|
}
|
||||||
setCurrentLabels={setCurrentLabels}
|
setOpen(false);
|
||||||
|
}}
|
||||||
|
onReset={() => {
|
||||||
|
const resetFilter: GeneralFilter = {
|
||||||
|
labels: undefined,
|
||||||
|
zones: undefined,
|
||||||
|
showAll: false,
|
||||||
|
};
|
||||||
|
setCurrentFilter(resetFilter);
|
||||||
|
onUpdateFilter(resetFilter);
|
||||||
|
}}
|
||||||
onClose={() => setOpen(false)}
|
onClose={() => setOpen(false)}
|
||||||
/>
|
/>
|
||||||
);
|
);
|
||||||
@@ -377,7 +391,12 @@ function GeneralFilterButton({
|
|||||||
open={open}
|
open={open}
|
||||||
onOpenChange={(open) => {
|
onOpenChange={(open) => {
|
||||||
if (!open) {
|
if (!open) {
|
||||||
setCurrentLabels(selectedLabels);
|
setCurrentFilter({
|
||||||
|
labels: selectedLabels,
|
||||||
|
zones: selectedZones,
|
||||||
|
showAll: showAll,
|
||||||
|
...filter,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
setOpen(open);
|
setOpen(open);
|
||||||
@@ -388,54 +407,50 @@ function GeneralFilterButton({
|
|||||||
|
|
||||||
type GeneralFilterContentProps = {
|
type GeneralFilterContentProps = {
|
||||||
allLabels: string[];
|
allLabels: string[];
|
||||||
selectedLabels: string[] | undefined;
|
allZones: string[];
|
||||||
currentLabels: string[] | undefined;
|
|
||||||
currentSeverity?: ReviewSeverity;
|
currentSeverity?: ReviewSeverity;
|
||||||
showAll?: boolean;
|
filter: GeneralFilter;
|
||||||
allZones?: string[];
|
selectedLabels?: string[];
|
||||||
selectedZones?: string[];
|
selectedZones?: string[];
|
||||||
currentZones?: string[];
|
onUpdateFilter: (filter: GeneralFilter) => void;
|
||||||
setShowAll?: (showAll: boolean) => void;
|
onApply: () => void;
|
||||||
updateLabelFilter: (labels: string[] | undefined) => void;
|
onReset: () => void;
|
||||||
setCurrentLabels: (labels: string[] | undefined) => void;
|
|
||||||
updateZoneFilter?: (zones: string[] | undefined) => void;
|
|
||||||
setCurrentZones?: (zones: string[] | undefined) => void;
|
|
||||||
onClose: () => void;
|
onClose: () => void;
|
||||||
};
|
};
|
||||||
export function GeneralFilterContent({
|
export function GeneralFilterContent({
|
||||||
allLabels,
|
allLabels,
|
||||||
selectedLabels,
|
|
||||||
currentLabels,
|
|
||||||
currentSeverity,
|
|
||||||
showAll,
|
|
||||||
allZones,
|
allZones,
|
||||||
selectedZones,
|
currentSeverity,
|
||||||
currentZones,
|
filter,
|
||||||
setShowAll,
|
onUpdateFilter,
|
||||||
updateLabelFilter,
|
onApply,
|
||||||
setCurrentLabels,
|
onReset,
|
||||||
updateZoneFilter,
|
|
||||||
setCurrentZones,
|
|
||||||
onClose,
|
onClose,
|
||||||
}: GeneralFilterContentProps) {
|
}: GeneralFilterContentProps) {
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<div className="scrollbar-container h-auto max-h-[80dvh] overflow-y-auto overflow-x-hidden">
|
<div className="scrollbar-container h-auto max-h-[80dvh] overflow-y-auto overflow-x-hidden">
|
||||||
{currentSeverity && setShowAll && (
|
{currentSeverity && (
|
||||||
<div className="my-2.5 flex flex-col gap-2.5">
|
<div className="my-2.5 flex flex-col gap-2.5">
|
||||||
<FilterSwitch
|
<FilterSwitch
|
||||||
label="Alerts"
|
label="Alerts"
|
||||||
disabled={currentSeverity == "alert"}
|
disabled={currentSeverity == "alert"}
|
||||||
isChecked={currentSeverity == "alert" ? true : showAll == true}
|
isChecked={
|
||||||
onCheckedChange={setShowAll}
|
currentSeverity == "alert" ? true : filter.showAll === true
|
||||||
|
}
|
||||||
|
onCheckedChange={(checked) =>
|
||||||
|
onUpdateFilter({ ...filter, showAll: checked })
|
||||||
|
}
|
||||||
/>
|
/>
|
||||||
<FilterSwitch
|
<FilterSwitch
|
||||||
label="Detections"
|
label="Detections"
|
||||||
disabled={currentSeverity == "detection"}
|
disabled={currentSeverity == "detection"}
|
||||||
isChecked={
|
isChecked={
|
||||||
currentSeverity == "detection" ? true : showAll == true
|
currentSeverity == "detection" ? true : filter.showAll === true
|
||||||
|
}
|
||||||
|
onCheckedChange={(checked) =>
|
||||||
|
onUpdateFilter({ ...filter, showAll: checked })
|
||||||
}
|
}
|
||||||
onCheckedChange={setShowAll}
|
|
||||||
/>
|
/>
|
||||||
<DropdownMenuSeparator />
|
<DropdownMenuSeparator />
|
||||||
</div>
|
</div>
|
||||||
@@ -450,10 +465,11 @@ export function GeneralFilterContent({
|
|||||||
<Switch
|
<Switch
|
||||||
className="ml-1"
|
className="ml-1"
|
||||||
id="allLabels"
|
id="allLabels"
|
||||||
checked={currentLabels == undefined}
|
checked={filter.labels === undefined}
|
||||||
onCheckedChange={(isChecked) => {
|
onCheckedChange={(isChecked) => {
|
||||||
if (isChecked) {
|
if (isChecked) {
|
||||||
setCurrentLabels(undefined);
|
const { labels: _labels, ...rest } = filter;
|
||||||
|
onUpdateFilter(rest);
|
||||||
}
|
}
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
@@ -463,20 +479,19 @@ export function GeneralFilterContent({
|
|||||||
<FilterSwitch
|
<FilterSwitch
|
||||||
key={item}
|
key={item}
|
||||||
label={item.replaceAll("_", " ")}
|
label={item.replaceAll("_", " ")}
|
||||||
isChecked={currentLabels?.includes(item) ?? false}
|
isChecked={filter.labels?.includes(item) ?? false}
|
||||||
onCheckedChange={(isChecked) => {
|
onCheckedChange={(isChecked) => {
|
||||||
if (isChecked) {
|
if (isChecked) {
|
||||||
const updatedLabels = currentLabels ? [...currentLabels] : [];
|
const updatedLabels = filter.labels ? [...filter.labels] : [];
|
||||||
|
|
||||||
updatedLabels.push(item);
|
updatedLabels.push(item);
|
||||||
setCurrentLabels(updatedLabels);
|
onUpdateFilter({ ...filter, labels: updatedLabels });
|
||||||
} else {
|
} else {
|
||||||
const updatedLabels = currentLabels ? [...currentLabels] : [];
|
const updatedLabels = filter.labels ? [...filter.labels] : [];
|
||||||
|
|
||||||
// can not deselect the last item
|
// can not deselect the last item
|
||||||
if (updatedLabels.length > 1) {
|
if (updatedLabels.length > 1) {
|
||||||
updatedLabels.splice(updatedLabels.indexOf(item), 1);
|
updatedLabels.splice(updatedLabels.indexOf(item), 1);
|
||||||
setCurrentLabels(updatedLabels);
|
onUpdateFilter({ ...filter, labels: updatedLabels });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}}
|
}}
|
||||||
@@ -484,7 +499,7 @@ export function GeneralFilterContent({
|
|||||||
))}
|
))}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
{allZones && setCurrentZones && (
|
{allZones && (
|
||||||
<>
|
<>
|
||||||
<DropdownMenuSeparator />
|
<DropdownMenuSeparator />
|
||||||
<div className="mb-5 mt-2.5 flex items-center justify-between">
|
<div className="mb-5 mt-2.5 flex items-center justify-between">
|
||||||
@@ -497,10 +512,11 @@ export function GeneralFilterContent({
|
|||||||
<Switch
|
<Switch
|
||||||
className="ml-1"
|
className="ml-1"
|
||||||
id="allZones"
|
id="allZones"
|
||||||
checked={currentZones == undefined}
|
checked={filter.zones === undefined}
|
||||||
onCheckedChange={(isChecked) => {
|
onCheckedChange={(isChecked) => {
|
||||||
if (isChecked) {
|
if (isChecked) {
|
||||||
setCurrentZones(undefined);
|
const { zones: _zones, ...rest } = filter;
|
||||||
|
onUpdateFilter(rest);
|
||||||
}
|
}
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
@@ -510,24 +526,24 @@ export function GeneralFilterContent({
|
|||||||
<FilterSwitch
|
<FilterSwitch
|
||||||
key={item}
|
key={item}
|
||||||
label={item.replaceAll("_", " ")}
|
label={item.replaceAll("_", " ")}
|
||||||
isChecked={currentZones?.includes(item) ?? false}
|
isChecked={filter.zones?.includes(item) ?? false}
|
||||||
onCheckedChange={(isChecked) => {
|
onCheckedChange={(isChecked) => {
|
||||||
if (isChecked) {
|
if (isChecked) {
|
||||||
const updatedZones = currentZones
|
const updatedZones = filter.zones
|
||||||
? [...currentZones]
|
? [...filter.zones]
|
||||||
: [];
|
: [];
|
||||||
|
|
||||||
updatedZones.push(item);
|
updatedZones.push(item);
|
||||||
setCurrentZones(updatedZones);
|
onUpdateFilter({ ...filter, zones: updatedZones });
|
||||||
} else {
|
} else {
|
||||||
const updatedZones = currentZones
|
const updatedZones = filter.zones
|
||||||
? [...currentZones]
|
? [...filter.zones]
|
||||||
: [];
|
: [];
|
||||||
|
|
||||||
// can not deselect the last item
|
// can not deselect the last item
|
||||||
if (updatedZones.length > 1) {
|
if (updatedZones.length > 1) {
|
||||||
updatedZones.splice(updatedZones.indexOf(item), 1);
|
updatedZones.splice(updatedZones.indexOf(item), 1);
|
||||||
setCurrentZones(updatedZones);
|
onUpdateFilter({ ...filter, zones: updatedZones });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}}
|
}}
|
||||||
@@ -543,27 +559,13 @@ export function GeneralFilterContent({
|
|||||||
aria-label="Apply"
|
aria-label="Apply"
|
||||||
variant="select"
|
variant="select"
|
||||||
onClick={() => {
|
onClick={() => {
|
||||||
if (selectedLabels != currentLabels) {
|
onApply();
|
||||||
updateLabelFilter(currentLabels);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (updateZoneFilter && selectedZones != currentZones) {
|
|
||||||
updateZoneFilter(currentZones);
|
|
||||||
}
|
|
||||||
|
|
||||||
onClose();
|
onClose();
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
Apply
|
Apply
|
||||||
</Button>
|
</Button>
|
||||||
<Button
|
<Button aria-label="Reset" onClick={onReset}>
|
||||||
aria-label="Reset"
|
|
||||||
onClick={() => {
|
|
||||||
setCurrentLabels(undefined);
|
|
||||||
setCurrentZones?.(undefined);
|
|
||||||
updateLabelFilter(undefined);
|
|
||||||
}}
|
|
||||||
>
|
|
||||||
Reset
|
Reset
|
||||||
</Button>
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
import { useTheme } from "@/context/theme-provider";
|
import { useTheme } from "@/context/theme-provider";
|
||||||
import { FrigateConfig } from "@/types/frigateConfig";
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
|
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
|
||||||
import { useCallback, useEffect, useMemo } from "react";
|
import { useCallback, useEffect, useMemo } from "react";
|
||||||
import Chart from "react-apexcharts";
|
import Chart from "react-apexcharts";
|
||||||
import { isMobileOnly } from "react-device-detect";
|
import { isMobileOnly } from "react-device-detect";
|
||||||
@@ -42,12 +43,14 @@ export function CameraLineGraph({
|
|||||||
|
|
||||||
const formatTime = useCallback(
|
const formatTime = useCallback(
|
||||||
(val: unknown) => {
|
(val: unknown) => {
|
||||||
const date = new Date(updateTimes[Math.round(val as number)] * 1000);
|
return formatUnixTimestampToDateTime(
|
||||||
return date.toLocaleTimeString([], {
|
updateTimes[Math.round(val as number)],
|
||||||
hour12: config?.ui.time_format != "24hour",
|
{
|
||||||
hour: "2-digit",
|
timezone: config?.ui.timezone,
|
||||||
minute: "2-digit",
|
strftime_fmt:
|
||||||
});
|
config?.ui.time_format == "24hour" ? "%H:%M" : "%I:%M %p",
|
||||||
|
},
|
||||||
|
);
|
||||||
},
|
},
|
||||||
[config, updateTimes],
|
[config, updateTimes],
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import { useTheme } from "@/context/theme-provider";
|
import { useTheme } from "@/context/theme-provider";
|
||||||
import { FrigateConfig } from "@/types/frigateConfig";
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
import { Threshold } from "@/types/graph";
|
import { Threshold } from "@/types/graph";
|
||||||
|
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
|
||||||
import { useCallback, useEffect, useMemo } from "react";
|
import { useCallback, useEffect, useMemo } from "react";
|
||||||
import Chart from "react-apexcharts";
|
import Chart from "react-apexcharts";
|
||||||
import { isMobileOnly } from "react-device-detect";
|
import { isMobileOnly } from "react-device-detect";
|
||||||
@@ -50,17 +51,17 @@ export function ThresholdBarGraph({
|
|||||||
|
|
||||||
let timeOffset = 0;
|
let timeOffset = 0;
|
||||||
if (dateIndex < 0) {
|
if (dateIndex < 0) {
|
||||||
timeOffset = 5000 * Math.abs(dateIndex);
|
timeOffset = 5 * Math.abs(dateIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
const date = new Date(
|
return formatUnixTimestampToDateTime(
|
||||||
updateTimes[Math.max(1, dateIndex) - 1] * 1000 - timeOffset,
|
updateTimes[Math.max(1, dateIndex) - 1] - timeOffset,
|
||||||
|
{
|
||||||
|
timezone: config?.ui.timezone,
|
||||||
|
strftime_fmt:
|
||||||
|
config?.ui.time_format == "24hour" ? "%H:%M" : "%I:%M %p",
|
||||||
|
},
|
||||||
);
|
);
|
||||||
return date.toLocaleTimeString([], {
|
|
||||||
hour12: config?.ui.time_format != "24hour",
|
|
||||||
hour: "2-digit",
|
|
||||||
minute: "2-digit",
|
|
||||||
});
|
|
||||||
},
|
},
|
||||||
[config, updateTimes],
|
[config, updateTimes],
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -1,4 +1,10 @@
|
|||||||
import React, { useState, useRef, useEffect, useCallback } from "react";
|
import React, {
|
||||||
|
useState,
|
||||||
|
useRef,
|
||||||
|
useEffect,
|
||||||
|
useCallback,
|
||||||
|
useMemo,
|
||||||
|
} from "react";
|
||||||
import {
|
import {
|
||||||
LuX,
|
LuX,
|
||||||
LuFilter,
|
LuFilter,
|
||||||
@@ -88,6 +94,11 @@ export default function InputWithTags({
|
|||||||
const [isDeleteDialogOpen, setIsDeleteDialogOpen] = useState(false);
|
const [isDeleteDialogOpen, setIsDeleteDialogOpen] = useState(false);
|
||||||
const [searchToDelete, setSearchToDelete] = useState<string | null>(null);
|
const [searchToDelete, setSearchToDelete] = useState<string | null>(null);
|
||||||
|
|
||||||
|
const searchHistoryNames = useMemo(
|
||||||
|
() => searchHistory?.map((item) => item.name) ?? [],
|
||||||
|
[searchHistory],
|
||||||
|
);
|
||||||
|
|
||||||
const handleSetSearchHistory = useCallback(() => {
|
const handleSetSearchHistory = useCallback(() => {
|
||||||
setIsSaveDialogOpen(true);
|
setIsSaveDialogOpen(true);
|
||||||
}, []);
|
}, []);
|
||||||
@@ -96,12 +107,8 @@ export default function InputWithTags({
|
|||||||
(name: string) => {
|
(name: string) => {
|
||||||
if (searchHistoryLoaded) {
|
if (searchHistoryLoaded) {
|
||||||
setSearchHistory([
|
setSearchHistory([
|
||||||
...(searchHistory ?? []),
|
...(searchHistory ?? []).filter((item) => item.name !== name),
|
||||||
{
|
{ name, search, filter: filters },
|
||||||
name: name,
|
|
||||||
search: search,
|
|
||||||
filter: filters,
|
|
||||||
},
|
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -187,6 +194,11 @@ export default function InputWithTags({
|
|||||||
if (newFilters[filterType] === filterValue) {
|
if (newFilters[filterType] === filterValue) {
|
||||||
delete newFilters[filterType];
|
delete newFilters[filterType];
|
||||||
}
|
}
|
||||||
|
} else if (filterType === "has_snapshot") {
|
||||||
|
if (newFilters[filterType] === filterValue) {
|
||||||
|
delete newFilters[filterType];
|
||||||
|
delete newFilters["is_submitted"];
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
delete newFilters[filterType];
|
delete newFilters[filterType];
|
||||||
}
|
}
|
||||||
@@ -300,6 +312,10 @@ export default function InputWithTags({
|
|||||||
if (!newFilters.has_snapshot) newFilters.has_snapshot = undefined;
|
if (!newFilters.has_snapshot) newFilters.has_snapshot = undefined;
|
||||||
newFilters.has_snapshot = value == "yes" ? 1 : 0;
|
newFilters.has_snapshot = value == "yes" ? 1 : 0;
|
||||||
break;
|
break;
|
||||||
|
case "is_submitted":
|
||||||
|
if (!newFilters.is_submitted) newFilters.is_submitted = undefined;
|
||||||
|
newFilters.is_submitted = value == "yes" ? 1 : 0;
|
||||||
|
break;
|
||||||
case "has_clip":
|
case "has_clip":
|
||||||
if (!newFilters.has_clip) newFilters.has_clip = undefined;
|
if (!newFilters.has_clip) newFilters.has_clip = undefined;
|
||||||
newFilters.has_clip = value == "yes" ? 1 : 0;
|
newFilters.has_clip = value == "yes" ? 1 : 0;
|
||||||
@@ -349,7 +365,11 @@ export default function InputWithTags({
|
|||||||
}`;
|
}`;
|
||||||
} else if (filterType === "min_score" || filterType === "max_score") {
|
} else if (filterType === "min_score" || filterType === "max_score") {
|
||||||
return Math.round(Number(filterValues) * 100).toString() + "%";
|
return Math.round(Number(filterValues) * 100).toString() + "%";
|
||||||
} else if (filterType === "has_clip" || filterType === "has_snapshot") {
|
} else if (
|
||||||
|
filterType === "has_clip" ||
|
||||||
|
filterType === "has_snapshot" ||
|
||||||
|
filterType === "is_submitted"
|
||||||
|
) {
|
||||||
return filterValues ? "Yes" : "No";
|
return filterValues ? "Yes" : "No";
|
||||||
} else {
|
} else {
|
||||||
return filterValues as string;
|
return filterValues as string;
|
||||||
@@ -456,9 +476,13 @@ export default function InputWithTags({
|
|||||||
}, [setFilters, resetSuggestions, setSearch, setInputFocused]);
|
}, [setFilters, resetSuggestions, setSearch, setInputFocused]);
|
||||||
|
|
||||||
const handleClearSimilarity = useCallback(() => {
|
const handleClearSimilarity = useCallback(() => {
|
||||||
removeFilter("event_id", filters.event_id!);
|
const newFilters = { ...filters };
|
||||||
removeFilter("search_type", "similarity");
|
if (newFilters.event_id === filters.event_id) {
|
||||||
}, [removeFilter, filters]);
|
delete newFilters.event_id;
|
||||||
|
}
|
||||||
|
delete newFilters.search_type;
|
||||||
|
setFilters(newFilters);
|
||||||
|
}, [setFilters, filters]);
|
||||||
|
|
||||||
const handleInputBlur = useCallback(
|
const handleInputBlur = useCallback(
|
||||||
(e: React.FocusEvent) => {
|
(e: React.FocusEvent) => {
|
||||||
@@ -523,17 +547,29 @@ export default function InputWithTags({
|
|||||||
|
|
||||||
const handleInputKeyDown = useCallback(
|
const handleInputKeyDown = useCallback(
|
||||||
(e: React.KeyboardEvent<HTMLInputElement>) => {
|
(e: React.KeyboardEvent<HTMLInputElement>) => {
|
||||||
|
const event = e.target as HTMLInputElement;
|
||||||
|
|
||||||
|
if (!currentFilterType && (e.key === "Home" || e.key === "End")) {
|
||||||
|
const position = e.key === "Home" ? 0 : event.value.length;
|
||||||
|
event.setSelectionRange(position, position);
|
||||||
|
}
|
||||||
|
|
||||||
if (
|
if (
|
||||||
e.key === "Enter" &&
|
e.key === "Enter" &&
|
||||||
inputValue.trim() !== "" &&
|
inputValue.trim() !== "" &&
|
||||||
filterSuggestions(suggestions).length == 0
|
filterSuggestions(suggestions).length == 0
|
||||||
) {
|
) {
|
||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
|
|
||||||
handleSearch(inputValue);
|
handleSearch(inputValue);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
[inputValue, handleSearch, filterSuggestions, suggestions],
|
[
|
||||||
|
inputValue,
|
||||||
|
handleSearch,
|
||||||
|
filterSuggestions,
|
||||||
|
suggestions,
|
||||||
|
currentFilterType,
|
||||||
|
],
|
||||||
);
|
);
|
||||||
|
|
||||||
// effects
|
// effects
|
||||||
@@ -744,13 +780,17 @@ export default function InputWithTags({
|
|||||||
</button>
|
</button>
|
||||||
</span>
|
</span>
|
||||||
))
|
))
|
||||||
: filterType !== "event_id" && (
|
: !(filterType == "event_id" && isSimilaritySearch) && (
|
||||||
<span
|
<span
|
||||||
key={filterType}
|
key={filterType}
|
||||||
className="inline-flex items-center whitespace-nowrap rounded-full bg-green-100 px-2 py-0.5 text-sm capitalize text-green-800"
|
className="inline-flex items-center whitespace-nowrap rounded-full bg-green-100 px-2 py-0.5 text-sm capitalize text-green-800"
|
||||||
>
|
>
|
||||||
{filterType.replaceAll("_", " ")}:{" "}
|
{filterType === "event_id"
|
||||||
{formatFilterValues(filterType, filterValues)}
|
? "Tracked Object ID"
|
||||||
|
: filterType === "is_submitted"
|
||||||
|
? "Submitted to Frigate+"
|
||||||
|
: filterType.replaceAll("_", " ")}
|
||||||
|
: {formatFilterValues(filterType, filterValues)}
|
||||||
<button
|
<button
|
||||||
onClick={() =>
|
onClick={() =>
|
||||||
removeFilter(
|
removeFilter(
|
||||||
@@ -823,6 +863,7 @@ export default function InputWithTags({
|
|||||||
</CommandList>
|
</CommandList>
|
||||||
</Command>
|
</Command>
|
||||||
<SaveSearchDialog
|
<SaveSearchDialog
|
||||||
|
existingNames={searchHistoryNames}
|
||||||
isOpen={isSaveDialogOpen}
|
isOpen={isSaveDialogOpen}
|
||||||
onClose={() => setIsSaveDialogOpen(false)}
|
onClose={() => setIsSaveDialogOpen(false)}
|
||||||
onSave={handleSaveSearch}
|
onSave={handleSaveSearch}
|
||||||
|
|||||||
@@ -9,17 +9,19 @@ import {
|
|||||||
|
|
||||||
import { Button } from "@/components/ui/button";
|
import { Button } from "@/components/ui/button";
|
||||||
import { Input } from "@/components/ui/input";
|
import { Input } from "@/components/ui/input";
|
||||||
import { useState } from "react";
|
import { useMemo, useState } from "react";
|
||||||
import { isMobile } from "react-device-detect";
|
import { isMobile } from "react-device-detect";
|
||||||
import { toast } from "sonner";
|
import { toast } from "sonner";
|
||||||
|
|
||||||
type SaveSearchDialogProps = {
|
type SaveSearchDialogProps = {
|
||||||
|
existingNames: string[];
|
||||||
isOpen: boolean;
|
isOpen: boolean;
|
||||||
onClose: () => void;
|
onClose: () => void;
|
||||||
onSave: (name: string) => void;
|
onSave: (name: string) => void;
|
||||||
};
|
};
|
||||||
|
|
||||||
export function SaveSearchDialog({
|
export function SaveSearchDialog({
|
||||||
|
existingNames,
|
||||||
isOpen,
|
isOpen,
|
||||||
onClose,
|
onClose,
|
||||||
onSave,
|
onSave,
|
||||||
@@ -37,6 +39,11 @@ export function SaveSearchDialog({
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const overwrite = useMemo(
|
||||||
|
() => existingNames.includes(searchName),
|
||||||
|
[existingNames, searchName],
|
||||||
|
);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Dialog open={isOpen} onOpenChange={onClose}>
|
<Dialog open={isOpen} onOpenChange={onClose}>
|
||||||
<DialogContent
|
<DialogContent
|
||||||
@@ -58,6 +65,12 @@ export function SaveSearchDialog({
|
|||||||
onChange={(e) => setSearchName(e.target.value)}
|
onChange={(e) => setSearchName(e.target.value)}
|
||||||
placeholder="Enter a name for your search"
|
placeholder="Enter a name for your search"
|
||||||
/>
|
/>
|
||||||
|
{overwrite && (
|
||||||
|
<div className="ml-1 text-sm text-danger">
|
||||||
|
{searchName} already exists. Saving will overwrite the existing
|
||||||
|
value.
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
<DialogFooter>
|
<DialogFooter>
|
||||||
<Button aria-label="Cancel" onClick={onClose}>
|
<Button aria-label="Cancel" onClick={onClose}>
|
||||||
Cancel
|
Cancel
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ import {
|
|||||||
DropdownMenuSubTrigger,
|
DropdownMenuSubTrigger,
|
||||||
DropdownMenuTrigger,
|
DropdownMenuTrigger,
|
||||||
} from "../ui/dropdown-menu";
|
} from "../ui/dropdown-menu";
|
||||||
import { Button } from "../ui/button";
|
|
||||||
import { Link } from "react-router-dom";
|
import { Link } from "react-router-dom";
|
||||||
import { CgDarkMode } from "react-icons/cg";
|
import { CgDarkMode } from "react-icons/cg";
|
||||||
import {
|
import {
|
||||||
@@ -33,30 +33,15 @@ import {
|
|||||||
useTheme,
|
useTheme,
|
||||||
} from "@/context/theme-provider";
|
} from "@/context/theme-provider";
|
||||||
import { IoColorPalette } from "react-icons/io5";
|
import { IoColorPalette } from "react-icons/io5";
|
||||||
import {
|
|
||||||
AlertDialog,
|
import { useState } from "react";
|
||||||
AlertDialogAction,
|
|
||||||
AlertDialogCancel,
|
|
||||||
AlertDialogContent,
|
|
||||||
AlertDialogFooter,
|
|
||||||
AlertDialogHeader,
|
|
||||||
AlertDialogTitle,
|
|
||||||
} from "../ui/alert-dialog";
|
|
||||||
import { useEffect, useState } from "react";
|
|
||||||
import { useRestart } from "@/api/ws";
|
import { useRestart } from "@/api/ws";
|
||||||
import {
|
|
||||||
Sheet,
|
|
||||||
SheetContent,
|
|
||||||
SheetDescription,
|
|
||||||
SheetHeader,
|
|
||||||
SheetTitle,
|
|
||||||
} from "../ui/sheet";
|
|
||||||
import {
|
import {
|
||||||
Tooltip,
|
Tooltip,
|
||||||
TooltipContent,
|
TooltipContent,
|
||||||
TooltipTrigger,
|
TooltipTrigger,
|
||||||
} from "@/components/ui/tooltip";
|
} from "@/components/ui/tooltip";
|
||||||
import ActivityIndicator from "../indicators/activity-indicator";
|
|
||||||
import { isDesktop, isMobile } from "react-device-detect";
|
import { isDesktop, isMobile } from "react-device-detect";
|
||||||
import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
|
import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
|
||||||
import {
|
import {
|
||||||
@@ -68,8 +53,8 @@ import {
|
|||||||
} from "../ui/dialog";
|
} from "../ui/dialog";
|
||||||
import { TooltipPortal } from "@radix-ui/react-tooltip";
|
import { TooltipPortal } from "@radix-ui/react-tooltip";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import { baseUrl } from "@/api/baseUrl";
|
|
||||||
import useSWR from "swr";
|
import useSWR from "swr";
|
||||||
|
import RestartDialog from "../overlay/dialog/RestartDialog";
|
||||||
|
|
||||||
type GeneralSettingsProps = {
|
type GeneralSettingsProps = {
|
||||||
className?: string;
|
className?: string;
|
||||||
@@ -83,35 +68,8 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
|
|||||||
|
|
||||||
const { theme, colorScheme, setTheme, setColorScheme } = useTheme();
|
const { theme, colorScheme, setTheme, setColorScheme } = useTheme();
|
||||||
const [restartDialogOpen, setRestartDialogOpen] = useState(false);
|
const [restartDialogOpen, setRestartDialogOpen] = useState(false);
|
||||||
const [restartingSheetOpen, setRestartingSheetOpen] = useState(false);
|
|
||||||
const [countdown, setCountdown] = useState(60);
|
|
||||||
|
|
||||||
const { send: sendRestart } = useRestart();
|
const { send: sendRestart } = useRestart();
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
let countdownInterval: NodeJS.Timeout;
|
|
||||||
|
|
||||||
if (restartingSheetOpen) {
|
|
||||||
countdownInterval = setInterval(() => {
|
|
||||||
setCountdown((prevCountdown) => prevCountdown - 1);
|
|
||||||
}, 1000);
|
|
||||||
}
|
|
||||||
|
|
||||||
return () => {
|
|
||||||
clearInterval(countdownInterval);
|
|
||||||
};
|
|
||||||
}, [restartingSheetOpen]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
if (countdown === 0) {
|
|
||||||
window.location.href = baseUrl;
|
|
||||||
}
|
|
||||||
}, [countdown]);
|
|
||||||
|
|
||||||
const handleForceReload = () => {
|
|
||||||
window.location.href = baseUrl;
|
|
||||||
};
|
|
||||||
|
|
||||||
const Container = isDesktop ? DropdownMenu : Drawer;
|
const Container = isDesktop ? DropdownMenu : Drawer;
|
||||||
const Trigger = isDesktop ? DropdownMenuTrigger : DrawerTrigger;
|
const Trigger = isDesktop ? DropdownMenuTrigger : DrawerTrigger;
|
||||||
const Content = isDesktop ? DropdownMenuContent : DrawerContent;
|
const Content = isDesktop ? DropdownMenuContent : DrawerContent;
|
||||||
@@ -413,64 +371,11 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
|
|||||||
</div>
|
</div>
|
||||||
</Content>
|
</Content>
|
||||||
</Container>
|
</Container>
|
||||||
{restartDialogOpen && (
|
<RestartDialog
|
||||||
<AlertDialog
|
isOpen={restartDialogOpen}
|
||||||
open={restartDialogOpen}
|
onClose={() => setRestartDialogOpen(false)}
|
||||||
onOpenChange={() => setRestartDialogOpen(false)}
|
onRestart={() => sendRestart("restart")}
|
||||||
>
|
/>
|
||||||
<AlertDialogContent>
|
|
||||||
<AlertDialogHeader>
|
|
||||||
<AlertDialogTitle>
|
|
||||||
Are you sure you want to restart Frigate?
|
|
||||||
</AlertDialogTitle>
|
|
||||||
</AlertDialogHeader>
|
|
||||||
<AlertDialogFooter>
|
|
||||||
<AlertDialogCancel>Cancel</AlertDialogCancel>
|
|
||||||
<AlertDialogAction
|
|
||||||
onClick={() => {
|
|
||||||
setRestartingSheetOpen(true);
|
|
||||||
sendRestart("restart");
|
|
||||||
}}
|
|
||||||
>
|
|
||||||
Restart
|
|
||||||
</AlertDialogAction>
|
|
||||||
</AlertDialogFooter>
|
|
||||||
</AlertDialogContent>
|
|
||||||
</AlertDialog>
|
|
||||||
)}
|
|
||||||
{restartingSheetOpen && (
|
|
||||||
<>
|
|
||||||
<Sheet
|
|
||||||
open={restartingSheetOpen}
|
|
||||||
onOpenChange={() => setRestartingSheetOpen(false)}
|
|
||||||
>
|
|
||||||
<SheetContent
|
|
||||||
side="top"
|
|
||||||
onInteractOutside={(e) => e.preventDefault()}
|
|
||||||
>
|
|
||||||
<div className="flex flex-col items-center">
|
|
||||||
<ActivityIndicator />
|
|
||||||
<SheetHeader className="mt-5 text-center">
|
|
||||||
<SheetTitle className="text-center">
|
|
||||||
Frigate is Restarting
|
|
||||||
</SheetTitle>
|
|
||||||
<SheetDescription className="text-center">
|
|
||||||
<p>This page will reload in {countdown} seconds.</p>
|
|
||||||
</SheetDescription>
|
|
||||||
</SheetHeader>
|
|
||||||
<Button
|
|
||||||
size="lg"
|
|
||||||
className="mt-5"
|
|
||||||
aria-label="Force reload now"
|
|
||||||
onClick={handleForceReload}
|
|
||||||
>
|
|
||||||
Force Reload Now
|
|
||||||
</Button>
|
|
||||||
</div>
|
|
||||||
</SheetContent>
|
|
||||||
</Sheet>
|
|
||||||
</>
|
|
||||||
)}
|
|
||||||
</>
|
</>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,15 +37,14 @@ import {
|
|||||||
TooltipContent,
|
TooltipContent,
|
||||||
TooltipTrigger,
|
TooltipTrigger,
|
||||||
} from "@/components/ui/tooltip";
|
} from "@/components/ui/tooltip";
|
||||||
import { FrigatePlusDialog } from "@/components/overlay/dialog/FrigatePlusDialog";
|
|
||||||
import useSWR from "swr";
|
import useSWR from "swr";
|
||||||
import { Event } from "@/types/event";
|
|
||||||
|
|
||||||
type SearchResultActionsProps = {
|
type SearchResultActionsProps = {
|
||||||
searchResult: SearchResult;
|
searchResult: SearchResult;
|
||||||
findSimilar: () => void;
|
findSimilar: () => void;
|
||||||
refreshResults: () => void;
|
refreshResults: () => void;
|
||||||
showObjectLifecycle: () => void;
|
showObjectLifecycle: () => void;
|
||||||
|
showSnapshot: () => void;
|
||||||
isContextMenu?: boolean;
|
isContextMenu?: boolean;
|
||||||
children?: ReactNode;
|
children?: ReactNode;
|
||||||
};
|
};
|
||||||
@@ -55,12 +54,12 @@ export default function SearchResultActions({
|
|||||||
findSimilar,
|
findSimilar,
|
||||||
refreshResults,
|
refreshResults,
|
||||||
showObjectLifecycle,
|
showObjectLifecycle,
|
||||||
|
showSnapshot,
|
||||||
isContextMenu = false,
|
isContextMenu = false,
|
||||||
children,
|
children,
|
||||||
}: SearchResultActionsProps) {
|
}: SearchResultActionsProps) {
|
||||||
const { data: config } = useSWR<FrigateConfig>("config");
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
|
|
||||||
const [showFrigatePlus, setShowFrigatePlus] = useState(false);
|
|
||||||
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
|
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
|
||||||
|
|
||||||
const handleDelete = () => {
|
const handleDelete = () => {
|
||||||
@@ -130,10 +129,7 @@ export default function SearchResultActions({
|
|||||||
searchResult.has_snapshot &&
|
searchResult.has_snapshot &&
|
||||||
searchResult.end_time &&
|
searchResult.end_time &&
|
||||||
!searchResult.plus_id && (
|
!searchResult.plus_id && (
|
||||||
<MenuItem
|
<MenuItem aria-label="Submit to Frigate Plus" onClick={showSnapshot}>
|
||||||
aria-label="Submit to Frigate Plus"
|
|
||||||
onClick={() => setShowFrigatePlus(true)}
|
|
||||||
>
|
|
||||||
<FrigatePlusIcon className="mr-2 size-4 cursor-pointer text-primary" />
|
<FrigatePlusIcon className="mr-2 size-4 cursor-pointer text-primary" />
|
||||||
<span>Submit to Frigate+</span>
|
<span>Submit to Frigate+</span>
|
||||||
</MenuItem>
|
</MenuItem>
|
||||||
@@ -159,7 +155,13 @@ export default function SearchResultActions({
|
|||||||
<AlertDialogTitle>Confirm Delete</AlertDialogTitle>
|
<AlertDialogTitle>Confirm Delete</AlertDialogTitle>
|
||||||
</AlertDialogHeader>
|
</AlertDialogHeader>
|
||||||
<AlertDialogDescription>
|
<AlertDialogDescription>
|
||||||
Are you sure you want to delete this tracked object?
|
Deleting this tracked object removes the snapshot, any saved
|
||||||
|
embeddings, and any associated object lifecycle entries. Recorded
|
||||||
|
footage of this tracked object in History view will <em>NOT</em> be
|
||||||
|
deleted.
|
||||||
|
<br />
|
||||||
|
<br />
|
||||||
|
Are you sure you want to proceed?
|
||||||
</AlertDialogDescription>
|
</AlertDialogDescription>
|
||||||
<AlertDialogFooter>
|
<AlertDialogFooter>
|
||||||
<AlertDialogCancel>Cancel</AlertDialogCancel>
|
<AlertDialogCancel>Cancel</AlertDialogCancel>
|
||||||
@@ -172,16 +174,6 @@ export default function SearchResultActions({
|
|||||||
</AlertDialogFooter>
|
</AlertDialogFooter>
|
||||||
</AlertDialogContent>
|
</AlertDialogContent>
|
||||||
</AlertDialog>
|
</AlertDialog>
|
||||||
<FrigatePlusDialog
|
|
||||||
upload={
|
|
||||||
showFrigatePlus ? (searchResult as unknown as Event) : undefined
|
|
||||||
}
|
|
||||||
onClose={() => setShowFrigatePlus(false)}
|
|
||||||
onEventUploaded={() => {
|
|
||||||
searchResult.plus_id = "submitted";
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
|
|
||||||
{isContextMenu ? (
|
{isContextMenu ? (
|
||||||
<ContextMenu>
|
<ContextMenu>
|
||||||
<ContextMenuTrigger>{children}</ContextMenuTrigger>
|
<ContextMenuTrigger>{children}</ContextMenuTrigger>
|
||||||
@@ -210,7 +202,7 @@ export default function SearchResultActions({
|
|||||||
<TooltipTrigger>
|
<TooltipTrigger>
|
||||||
<FrigatePlusIcon
|
<FrigatePlusIcon
|
||||||
className="size-5 cursor-pointer text-primary-variant hover:text-primary"
|
className="size-5 cursor-pointer text-primary-variant hover:text-primary"
|
||||||
onClick={() => setShowFrigatePlus(true)}
|
onClick={showSnapshot}
|
||||||
/>
|
/>
|
||||||
</TooltipTrigger>
|
</TooltipTrigger>
|
||||||
<TooltipContent>Submit to Frigate+</TooltipContent>
|
<TooltipContent>Submit to Frigate+</TooltipContent>
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import ActivityIndicator from "../indicators/activity-indicator";
|
|||||||
import { GpuInfo, Nvinfo, Vainfo } from "@/types/stats";
|
import { GpuInfo, Nvinfo, Vainfo } from "@/types/stats";
|
||||||
import { Button } from "../ui/button";
|
import { Button } from "../ui/button";
|
||||||
import copy from "copy-to-clipboard";
|
import copy from "copy-to-clipboard";
|
||||||
|
import { toast } from "sonner";
|
||||||
|
|
||||||
type GPUInfoDialogProps = {
|
type GPUInfoDialogProps = {
|
||||||
showGpuInfo: boolean;
|
showGpuInfo: boolean;
|
||||||
@@ -30,12 +31,11 @@ export default function GPUInfoDialog({
|
|||||||
|
|
||||||
const onCopyInfo = async () => {
|
const onCopyInfo = async () => {
|
||||||
copy(
|
copy(
|
||||||
JSON.stringify(gpuType == "vainfo" ? vainfo : nvinfo).replace(
|
JSON.stringify(gpuType == "vainfo" ? vainfo : nvinfo)
|
||||||
/[\\\s]+/gi,
|
.replace(/\\t/g, "\t")
|
||||||
"",
|
.replace(/\\n/g, "\n"),
|
||||||
),
|
|
||||||
);
|
);
|
||||||
setShowGpuInfo(false);
|
toast.success("Copied GPU info to clipboard.");
|
||||||
};
|
};
|
||||||
|
|
||||||
if (gpuType == "vainfo") {
|
if (gpuType == "vainfo") {
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import { Button } from "../ui/button";
|
|||||||
import { FaArrowDown, FaCalendarAlt, FaCog, FaFilter } from "react-icons/fa";
|
import { FaArrowDown, FaCalendarAlt, FaCog, FaFilter } from "react-icons/fa";
|
||||||
import { TimeRange } from "@/types/timeline";
|
import { TimeRange } from "@/types/timeline";
|
||||||
import { ExportContent, ExportPreviewDialog } from "./ExportDialog";
|
import { ExportContent, ExportPreviewDialog } from "./ExportDialog";
|
||||||
import { ExportMode } from "@/types/filter";
|
import { ExportMode, GeneralFilter } from "@/types/filter";
|
||||||
import ReviewActivityCalendar from "./ReviewActivityCalendar";
|
import ReviewActivityCalendar from "./ReviewActivityCalendar";
|
||||||
import { SelectSeparator } from "../ui/select";
|
import { SelectSeparator } from "../ui/select";
|
||||||
import { ReviewFilter, ReviewSeverity, ReviewSummary } from "@/types/review";
|
import { ReviewFilter, ReviewSeverity, ReviewSummary } from "@/types/review";
|
||||||
@@ -114,12 +114,12 @@ export default function MobileReviewSettingsDrawer({
|
|||||||
|
|
||||||
// filters
|
// filters
|
||||||
|
|
||||||
const [currentLabels, setCurrentLabels] = useState<string[] | undefined>(
|
const [currentFilter, setCurrentFilter] = useState<GeneralFilter>({
|
||||||
filter?.labels,
|
labels: filter?.labels,
|
||||||
);
|
zones: filter?.zones,
|
||||||
const [currentZones, setCurrentZones] = useState<string[] | undefined>(
|
showAll: filter?.showAll,
|
||||||
filter?.zones,
|
...filter,
|
||||||
);
|
});
|
||||||
|
|
||||||
if (!isMobile) {
|
if (!isMobile) {
|
||||||
return;
|
return;
|
||||||
@@ -260,23 +260,21 @@ export default function MobileReviewSettingsDrawer({
|
|||||||
<GeneralFilterContent
|
<GeneralFilterContent
|
||||||
allLabels={allLabels}
|
allLabels={allLabels}
|
||||||
selectedLabels={filter?.labels}
|
selectedLabels={filter?.labels}
|
||||||
currentLabels={currentLabels}
|
|
||||||
currentSeverity={currentSeverity}
|
currentSeverity={currentSeverity}
|
||||||
showAll={filter?.showAll == true}
|
|
||||||
allZones={allZones}
|
allZones={allZones}
|
||||||
|
filter={currentFilter}
|
||||||
selectedZones={filter?.zones}
|
selectedZones={filter?.zones}
|
||||||
currentZones={currentZones}
|
onUpdateFilter={setCurrentFilter}
|
||||||
setCurrentZones={setCurrentZones}
|
onApply={() => {
|
||||||
updateZoneFilter={(newZones) =>
|
if (currentFilter !== filter) {
|
||||||
onUpdateFilter({ ...filter, zones: newZones })
|
onUpdateFilter(currentFilter);
|
||||||
}
|
}
|
||||||
setShowAll={(showAll) => {
|
}}
|
||||||
onUpdateFilter({ ...filter, showAll });
|
onReset={() => {
|
||||||
|
const resetFilter: GeneralFilter = {};
|
||||||
|
setCurrentFilter(resetFilter);
|
||||||
|
onUpdateFilter(resetFilter);
|
||||||
}}
|
}}
|
||||||
setCurrentLabels={setCurrentLabels}
|
|
||||||
updateLabelFilter={(newLabels) =>
|
|
||||||
onUpdateFilter({ ...filter, labels: newLabels })
|
|
||||||
}
|
|
||||||
onClose={() => setDrawerMode("select")}
|
onClose={() => setDrawerMode("select")}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -111,13 +111,13 @@ export function AnnotationSettingsPane({
|
|||||||
function onApply(values: z.infer<typeof formSchema>) {
|
function onApply(values: z.infer<typeof formSchema>) {
|
||||||
if (
|
if (
|
||||||
!values ||
|
!values ||
|
||||||
values.annotationOffset == null ||
|
values.annotationOffset === null ||
|
||||||
values.annotationOffset == "" ||
|
values.annotationOffset === "" ||
|
||||||
!config
|
!config
|
||||||
) {
|
) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
setAnnotationOffset(values.annotationOffset);
|
setAnnotationOffset(values.annotationOffset ?? 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
|
|||||||
@@ -203,6 +203,20 @@ export default function ObjectLifecycle({
|
|||||||
setCurrent(index);
|
setCurrent(index);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const handleThumbnailNavigation = useCallback(
|
||||||
|
(direction: "next" | "previous") => {
|
||||||
|
if (!mainApi || !thumbnailApi || !eventSequence) return;
|
||||||
|
const newIndex =
|
||||||
|
direction === "next"
|
||||||
|
? Math.min(current + 1, eventSequence.length - 1)
|
||||||
|
: Math.max(current - 1, 0);
|
||||||
|
mainApi.scrollTo(newIndex);
|
||||||
|
thumbnailApi.scrollTo(newIndex);
|
||||||
|
setCurrent(newIndex);
|
||||||
|
},
|
||||||
|
[mainApi, thumbnailApi, current, eventSequence],
|
||||||
|
);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (eventSequence && eventSequence.length > 0) {
|
if (eventSequence && eventSequence.length > 0) {
|
||||||
setTimeIndex(eventSequence?.[current].timestamp);
|
setTimeIndex(eventSequence?.[current].timestamp);
|
||||||
@@ -413,6 +427,7 @@ export default function ObjectLifecycle({
|
|||||||
</div>
|
</div>
|
||||||
<div className="text-sm text-primary-variant">
|
<div className="text-sm text-primary-variant">
|
||||||
{formatUnixTimestampToDateTime(item.timestamp, {
|
{formatUnixTimestampToDateTime(item.timestamp, {
|
||||||
|
timezone: config.ui.timezone,
|
||||||
strftime_fmt:
|
strftime_fmt:
|
||||||
config.ui.time_format == "24hour"
|
config.ui.time_format == "24hour"
|
||||||
? "%d %b %H:%M:%S"
|
? "%d %b %H:%M:%S"
|
||||||
@@ -495,7 +510,7 @@ export default function ObjectLifecycle({
|
|||||||
containScroll: "keepSnaps",
|
containScroll: "keepSnaps",
|
||||||
dragFree: true,
|
dragFree: true,
|
||||||
}}
|
}}
|
||||||
className="w-full max-w-[72%] md:max-w-[85%]"
|
className="max-w-[72%] md:max-w-[85%]"
|
||||||
setApi={setThumbnailApi}
|
setApi={setThumbnailApi}
|
||||||
>
|
>
|
||||||
<CarouselContent
|
<CarouselContent
|
||||||
@@ -507,10 +522,7 @@ export default function ObjectLifecycle({
|
|||||||
{eventSequence.map((item, index) => (
|
{eventSequence.map((item, index) => (
|
||||||
<CarouselItem
|
<CarouselItem
|
||||||
key={index}
|
key={index}
|
||||||
className={cn(
|
className={cn("basis-auto cursor-pointer pl-1")}
|
||||||
"basis-1/4 cursor-pointer pl-1 md:basis-[10%]",
|
|
||||||
fullscreen && "md:basis-16",
|
|
||||||
)}
|
|
||||||
onClick={() => handleThumbnailClick(index)}
|
onClick={() => handleThumbnailClick(index)}
|
||||||
>
|
>
|
||||||
<div className="p-1">
|
<div className="p-1">
|
||||||
@@ -545,8 +557,14 @@ export default function ObjectLifecycle({
|
|||||||
</CarouselItem>
|
</CarouselItem>
|
||||||
))}
|
))}
|
||||||
</CarouselContent>
|
</CarouselContent>
|
||||||
<CarouselPrevious />
|
<CarouselPrevious
|
||||||
<CarouselNext />
|
disabled={current === 0}
|
||||||
|
onClick={() => handleThumbnailNavigation("previous")}
|
||||||
|
/>
|
||||||
|
<CarouselNext
|
||||||
|
disabled={current === eventSequence.length - 1}
|
||||||
|
onClick={() => handleThumbnailNavigation("next")}
|
||||||
|
/>
|
||||||
</Carousel>
|
</Carousel>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -621,7 +639,7 @@ function getLifecycleItemDescription(lifecycleItem: ObjectLifecycleSequence) {
|
|||||||
)} detected for ${label}`;
|
)} detected for ${label}`;
|
||||||
} else {
|
} else {
|
||||||
title = `${
|
title = `${
|
||||||
lifecycleItem.data.sub_label
|
lifecycleItem.data.label
|
||||||
} recognized as ${lifecycleItem.data.attribute.replaceAll("_", " ")}`;
|
} recognized as ${lifecycleItem.data.attribute.replaceAll("_", " ")}`;
|
||||||
}
|
}
|
||||||
return title;
|
return title;
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user