Compare commits

...

11 Commits

Author SHA1 Message Date
dependabot[bot]
727bddac03 Bump docker/bake-action from 3 to 6
Bumps [docker/bake-action](https://github.com/docker/bake-action) from 3 to 6.
- [Release notes](https://github.com/docker/bake-action/releases)
- [Commits](https://github.com/docker/bake-action/compare/v3...v6)

---
updated-dependencies:
- dependency-name: docker/bake-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-08 11:13:17 +00:00
Nicolas Mowen
d57a61b50f Simplify model config (#15881)
* Add migration to migrate to model_path

* Simplify model config

* Cleanup docs

* Set config version

* Formatting

* Fix tests
2025-01-07 20:59:37 -07:00
Nicolas Mowen
4fc9106c17 Update for correct audio requirements (#15882) 2025-01-07 17:02:32 -06:00
Nicolas Mowen
38e098ca31 Remove extra data except from keypackets when using qsv (#15865) 2025-01-06 17:38:46 -06:00
Nicolas Mowen
e7ad38d827 Update model docs (#15779) 2025-01-02 10:04:16 -06:00
Josh Hawkins
a1ce9aacf2 Tracked object details pane bugfix (#15736)
* restore save button in tracked object details pane

* conditionally show save button
2024-12-30 08:23:25 -06:00
Nicolas Mowen
322b847356 Fix event cleanup (#15724) 2024-12-29 14:47:40 -06:00
Josh Hawkins
98338e4c7f Ensure object lifecycle ratio is re-normalized to camera aspect (#15717) 2024-12-28 13:37:39 -07:00
Josh Hawkins
171a89f37b Language consistency - use Explore instead of Search (#15709) 2024-12-27 17:38:43 -07:00
Josh Hawkins
8114b541a8 Sort camera group edit screen by ui config values (#15705) 2024-12-27 14:30:27 -06:00
Josh Hawkins
c48396c5c6 Fix crash when streams are undefined in go2rtc config password cleaning (#15695) 2024-12-27 08:36:21 -06:00
19 changed files with 140 additions and 108 deletions

View File

@@ -66,7 +66,7 @@ jobs:
${{ steps.setup.outputs.image-name }}-standard-arm64
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
- name: Build and push RPi build
uses: docker/bake-action@v4
uses: docker/bake-action@v6
with:
push: true
targets: rpi
@@ -94,7 +94,7 @@ jobs:
BASE_IMAGE: timongentzsch/l4t-ubuntu20-opencv:latest
SLIM_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
TRT_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
uses: docker/bake-action@v4
uses: docker/bake-action@v6
with:
push: true
targets: tensorrt
@@ -122,7 +122,7 @@ jobs:
BASE_IMAGE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
SLIM_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
TRT_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
uses: docker/bake-action@v4
uses: docker/bake-action@v6
with:
push: true
targets: tensorrt
@@ -149,7 +149,7 @@ jobs:
- name: Build and push TensorRT (x86 GPU)
env:
COMPUTE_LEVEL: "50 60 70 80 90"
uses: docker/bake-action@v4
uses: docker/bake-action@v6
with:
push: true
targets: tensorrt
@@ -174,7 +174,7 @@ jobs:
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Rockchip build
uses: docker/bake-action@v3
uses: docker/bake-action@v6
with:
push: true
targets: rk
@@ -199,7 +199,7 @@ jobs:
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Hailo-8l build
uses: docker/bake-action@v4
uses: docker/bake-action@v6
with:
push: true
targets: h8l
@@ -212,7 +212,7 @@ jobs:
env:
AMDGPU: gfx
HSA_OVERRIDE: 0
uses: docker/bake-action@v3
uses: docker/bake-action@v6
with:
push: true
targets: rocm

View File

@@ -203,14 +203,13 @@ detectors:
ov:
type: openvino
device: AUTO
model:
path: /openvino-model/ssdlite_mobilenet_v2.xml
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
path: /openvino-model/ssdlite_mobilenet_v2.xml
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
record:

View File

@@ -29,7 +29,7 @@ The default video and audio codec on your camera may not always be compatible wi
### Audio Support
MSE Requires AAC audio, WebRTC requires PCMU/PCMA, or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
MSE Requires PCMA/PCMU or AAC audio, WebRTC requires PCMA/PCMU or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
```yaml
go2rtc:

View File

@@ -144,7 +144,9 @@ detectors:
#### SSDLite MobileNet v2
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model.
Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model:
```yaml
detectors:
@@ -254,6 +256,7 @@ yolov4x-mish-640
yolov7-tiny-288
yolov7-tiny-416
yolov7-640
yolov7-416
yolov7-320
yolov7x-640
yolov7x-320
@@ -282,6 +285,8 @@ The TensorRT detector can be selected by specifying `tensorrt` as the model type
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
Use the config below to work with generated TRT models:
```yaml
detectors:
tensorrt:
@@ -501,11 +506,12 @@ detectors:
cpu1:
type: cpu
num_threads: 3
model:
path: "/custom_model.tflite"
cpu2:
type: cpu
num_threads: 3
model:
path: "/custom_model.tflite"
```
When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance.
@@ -632,8 +638,6 @@ detectors:
hailo8l:
type: hailo8l
device: PCIe
model:
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
model:
width: 300
@@ -641,4 +645,5 @@ model:
input_tensor: nhwc
input_pixel_format: bgr
model_type: ssd
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
```

View File

@@ -52,7 +52,7 @@ detectors:
# Required: name of the detector
detector_name:
# Required: type of the detector
# Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below)
# Frigate provides many types, see https://docs.frigate.video/configuration/object_detectors for more details (default: shown below)
# Additional detector types can also be plugged in.
# Detectors may require additional configuration.
# Refer to the Detectors configuration page for more information.
@@ -117,25 +117,27 @@ auth:
hash_iterations: 600000
# Optional: model modifications
# NOTE: The default values are for the EdgeTPU detector.
# Other detectors will require the model config to be set.
model:
# Optional: path to the model (default: automatic based on detector)
# Required: path to the model (default: automatic based on detector)
path: /edgetpu_model.tflite
# Optional: path to the labelmap (default: shown below)
# Required: path to the labelmap (default: shown below)
labelmap_path: /labelmap.txt
# Required: Object detection model input width (default: shown below)
width: 320
# Required: Object detection model input height (default: shown below)
height: 320
# Optional: Object detection model input colorspace
# Required: Object detection model input colorspace
# Valid values are rgb, bgr, or yuv. (default: shown below)
input_pixel_format: rgb
# Optional: Object detection model input tensor format
# Required: Object detection model input tensor format
# Valid values are nhwc or nchw (default: shown below)
input_tensor: nhwc
# Optional: Object detection model type, currently only used with the OpenVINO detector
# Required: Object detection model type, currently only used with the OpenVINO detector
# Valid values are ssd, yolox, yolonas (default: shown below)
model_type: ssd
# Optional: Label name modifications. These are merged into the standard labelmap.
# Required: Label name modifications. These are merged into the standard labelmap.
labelmap:
2: vehicle
# Optional: Map of object labels to their attribute labels (default: depends on model)

View File

@@ -139,6 +139,8 @@ def config(request: Request):
mode="json", warnings="none", exclude_none=True
)
for stream_name, stream in go2rtc.get("streams", {}).items():
if stream is None:
continue
if isinstance(stream, str):
cleaned = clean_camera_user_pass(stream)
else:

View File

@@ -594,35 +594,27 @@ class FrigateConfig(FrigateBaseModel):
if isinstance(detector, dict)
else detector.model_dump(warnings="none")
)
detector_config: DetectorConfig = adapter.validate_python(model_dict)
if detector_config.model is None:
detector_config.model = self.model.model_copy()
else:
path = detector_config.model.path
detector_config.model = self.model.model_copy()
detector_config.model.path = path
detector_config: BaseDetectorConfig = adapter.validate_python(model_dict)
if "path" not in model_dict or len(model_dict.keys()) > 1:
logger.warning(
"Customizing more than a detector model path is unsupported."
)
# users should not set model themselves
if detector_config.model:
detector_config.model = None
merged_model = deep_merge(
detector_config.model.model_dump(exclude_unset=True, warnings="none"),
self.model.model_dump(exclude_unset=True, warnings="none"),
)
model_config = self.model.model_dump(exclude_unset=True, warnings="none")
if "path" not in merged_model:
if detector_config.model_path:
model_config["path"] = detector_config.model_path
if "path" not in model_config:
if detector_config.type == "cpu":
merged_model["path"] = "/cpu_model.tflite"
model_config["path"] = "/cpu_model.tflite"
elif detector_config.type == "edgetpu":
merged_model["path"] = "/edgetpu_model.tflite"
model_config["path"] = "/edgetpu_model.tflite"
detector_config.model = ModelConfig.model_validate(merged_model)
detector_config.model.check_and_load_plus_model(
self.plus_api, detector_config.type
)
detector_config.model.compute_model_hash()
model = ModelConfig.model_validate(model_config)
model.check_and_load_plus_model(self.plus_api, detector_config.type)
model.compute_model_hash()
detector_config.model = model
self.detectors[key] = detector_config
return self

View File

@@ -194,6 +194,9 @@ class BaseDetectorConfig(BaseModel):
model: Optional[ModelConfig] = Field(
default=None, title="Detector specific model configuration."
)
model_path: Optional[str] = Field(
default=None, title="Detector specific model path."
)
model_config = ConfigDict(
extra="allow", arbitrary_types_allowed=True, protected_namespaces=()
)

View File

@@ -121,8 +121,8 @@ class EventCleanup(threading.Thread):
events_to_update = []
for batch in query.iterator():
events_to_update.extend([event.id for event in batch])
for event in query.iterator():
events_to_update.append(event.id)
if len(events_to_update) >= CHUNK_SIZE:
logger.debug(
f"Updating {update_params} for {len(events_to_update)} events"
@@ -257,7 +257,7 @@ class EventCleanup(threading.Thread):
events_to_update = []
for event in query.iterator():
events_to_update.append(event)
events_to_update.append(event.id)
if len(events_to_update) >= CHUNK_SIZE:
logger.debug(

View File

@@ -71,8 +71,8 @@ PRESETS_HW_ACCEL_DECODE = {
"preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m",
"preset-rpi-64-h265": "-c:v:1 hevc_v4l2m2m",
FFMPEG_HWACCEL_VAAPI: f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi",
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv",
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v hevc_qsv",
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv -bsf:v dump_extra", # https://trac.ffmpeg.org/ticket/9766#comment:17
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v hevc_qsv -bsf:v dump_extra", # https://trac.ffmpeg.org/ticket/9766#comment:17
FFMPEG_HWACCEL_NVIDIA: "-hwaccel cuda -hwaccel_output_format cuda",
"preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}",
"preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}",

View File

@@ -75,11 +75,11 @@ class TestConfig(unittest.TestCase):
"detectors": {
"cpu": {
"type": "cpu",
"model": {"path": "/cpu_model.tflite"},
"model_path": "/cpu_model.tflite",
},
"edgetpu": {
"type": "edgetpu",
"model": {"path": "/edgetpu_model.tflite"},
"model_path": "/edgetpu_model.tflite",
},
"openvino": {
"type": "openvino",

View File

@@ -13,7 +13,7 @@ from frigate.util.services import get_video_properties
logger = logging.getLogger(__name__)
CURRENT_CONFIG_VERSION = "0.15-0"
CURRENT_CONFIG_VERSION = "0.15-1"
DEFAULT_CONFIG_FILE = "/config/config.yml"
@@ -77,6 +77,13 @@ def migrate_frigate_config(config_file: str):
yaml.dump(new_config, f)
previous_version = "0.15-0"
if previous_version < "0.15-1":
logger.info(f"Migrating frigate config from {previous_version} to 0.15-1...")
new_config = migrate_015_1(config)
with open(config_file, "w") as f:
yaml.dump(new_config, f)
previous_version = "0.15-1"
logger.info("Finished frigate config migration...")
@@ -267,6 +274,21 @@ def migrate_015_0(config: dict[str, dict[str, any]]) -> dict[str, dict[str, any]
return new_config
def migrate_015_1(config: dict[str, dict[str, any]]) -> dict[str, dict[str, any]]:
"""Handle migrating frigate config to 0.15-1"""
new_config = config.copy()
for detector, detector_config in config.get("detectors", {}).items():
path = detector_config.get("model", {}).get("path")
if path:
new_config["detectors"][detector]["model_path"] = path
del new_config["detectors"][detector]["model"]
new_config["version"] = "0.15-1"
return new_config
def get_relative_coordinates(
mask: Optional[Union[str, list]], frame_shape: tuple[int, int]
) -> Union[str, list]:

View File

@@ -755,7 +755,11 @@ export function CameraGroupEdit({
<FormMessage />
{[
...(birdseyeConfig?.enabled ? ["birdseye"] : []),
...Object.keys(config?.cameras ?? {}),
...Object.keys(config?.cameras ?? {}).sort(
(a, b) =>
(config?.cameras[a]?.ui?.order ?? 0) -
(config?.cameras[b]?.ui?.order ?? 0),
),
].map((camera) => (
<FormControl key={camera}>
<FilterSwitch

View File

@@ -477,7 +477,10 @@ export default function ObjectLifecycle({
</p>
{Array.isArray(item.data.box) &&
item.data.box.length >= 4
? (item.data.box[2] / item.data.box[3]).toFixed(2)
? (
aspectRatio *
(item.data.box[2] / item.data.box[3])
).toFixed(2)
: "N/A"}
</div>
</div>

View File

@@ -505,45 +505,46 @@ function ObjectDetailsTab({
<div className="flex w-full flex-row justify-end gap-2">
{config?.cameras[search.camera].genai.enabled && search.end_time && (
<>
<div className="flex items-start">
<Button
className="rounded-r-none border-r-0"
aria-label="Regenerate tracked object description"
onClick={() => regenerateDescription("thumbnails")}
>
Regenerate
</Button>
{search.has_snapshot && (
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
className="rounded-l-none border-l-0 px-2"
aria-label="Expand regeneration menu"
>
<FaChevronDown className="size-3" />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent>
<DropdownMenuItem
className="cursor-pointer"
aria-label="Regenerate from snapshot"
onClick={() => regenerateDescription("snapshot")}
>
Regenerate from Snapshot
</DropdownMenuItem>
<DropdownMenuItem
className="cursor-pointer"
aria-label="Regenerate from thumbnails"
onClick={() => regenerateDescription("thumbnails")}
>
Regenerate from Thumbnails
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
)}
</div>
<div className="flex items-start">
<Button
className="rounded-r-none border-r-0"
aria-label="Regenerate tracked object description"
onClick={() => regenerateDescription("thumbnails")}
>
Regenerate
</Button>
{search.has_snapshot && (
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
className="rounded-l-none border-l-0 px-2"
aria-label="Expand regeneration menu"
>
<FaChevronDown className="size-3" />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent>
<DropdownMenuItem
className="cursor-pointer"
aria-label="Regenerate from snapshot"
onClick={() => regenerateDescription("snapshot")}
>
Regenerate from Snapshot
</DropdownMenuItem>
<DropdownMenuItem
className="cursor-pointer"
aria-label="Regenerate from thumbnails"
onClick={() => regenerateDescription("thumbnails")}
>
Regenerate from Thumbnails
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
)}
</div>
)}
{(config?.cameras[search.camera].genai.enabled && search.end_time) ||
(!config?.cameras[search.camera].genai.enabled && (
<Button
variant="select"
aria-label="Save"
@@ -551,8 +552,7 @@ function ObjectDetailsTab({
>
Save
</Button>
</>
)}
))}
</div>
</div>
</div>

View File

@@ -46,7 +46,7 @@ export default function SearchSettings({
const trigger = (
<Button
className="flex items-center gap-2"
aria-label="Search Settings"
aria-label="Explore Settings"
size="sm"
>
<FaCog className="text-secondary-foreground" />

View File

@@ -328,12 +328,12 @@ export default function Explore() {
<div className="flex max-w-96 flex-col items-center justify-center space-y-3 rounded-lg bg-background/50 p-5">
<div className="my-5 flex flex-col items-center gap-2 text-xl">
<TbExclamationCircle className="mb-3 size-10" />
<div>Search Unavailable</div>
<div>Explore is Unavailable</div>
</div>
{embeddingsReindexing && allModelsLoaded && (
<>
<div className="text-center text-primary-variant">
Search can be used after tracked object embeddings have
Explore can be used after tracked object embeddings have
finished reindexing.
</div>
<div className="pt-5 text-center">
@@ -384,8 +384,8 @@ export default function Explore() {
<>
<div className="text-center text-primary-variant">
Frigate is downloading the necessary embeddings models to
support semantic searching. This may take several minutes
depending on the speed of your network connection.
support the Semantic Search feature. This may take several
minutes depending on the speed of your network connection.
</div>
<div className="flex w-96 flex-col gap-2 py-5">
<div className="flex flex-row items-center justify-center gap-2">

View File

@@ -40,7 +40,7 @@ import UiSettingsView from "@/views/settings/UiSettingsView";
const allSettingsViews = [
"UI settings",
"search settings",
"explore settings",
"camera settings",
"masks / zones",
"motion tuner",
@@ -175,7 +175,7 @@ export default function Settings() {
</div>
<div className="mt-2 flex h-full w-full flex-col items-start md:h-dvh md:pb-24">
{page == "UI settings" && <UiSettingsView />}
{page == "search settings" && (
{page == "explore settings" && (
<SearchSettingsView setUnsavedChanges={setUnsavedChanges} />
)}
{page == "debug" && (

View File

@@ -91,7 +91,7 @@ export default function SearchSettingsView({
)
.then((res) => {
if (res.status === 200) {
toast.success("Search settings have been saved.", {
toast.success("Explore settings have been saved.", {
position: "top-center",
});
setChangedValue(false);
@@ -128,7 +128,7 @@ export default function SearchSettingsView({
if (changedValue) {
addMessage(
"search_settings",
`Unsaved search settings changes`,
`Unsaved Explore settings changes`,
undefined,
"search_settings",
);
@@ -140,7 +140,7 @@ export default function SearchSettingsView({
}, [changedValue]);
useEffect(() => {
document.title = "Search Settings - Frigate";
document.title = "Explore Settings - Frigate";
}, []);
if (!config) {
@@ -152,7 +152,7 @@ export default function SearchSettingsView({
<Toaster position="top-center" closeButton={true} />
<div className="scrollbar-container order-last mb-10 mt-2 flex h-full w-full flex-col overflow-y-auto rounded-lg border-[1px] border-secondary-foreground bg-background_alt p-2 md:order-none md:mb-0 md:mr-2 md:mt-0">
<Heading as="h3" className="my-2">
Search Settings
Explore Settings
</Heading>
<Separator className="my-2 flex bg-secondary" />
<Heading as="h4" className="my-2">
@@ -221,7 +221,7 @@ export default function SearchSettingsView({
<div className="text-md">Model Size</div>
<div className="space-y-1 text-sm text-muted-foreground">
<p>
The size of the model used for semantic search embeddings.
The size of the model used for Semantic Search embeddings.
</p>
<ul className="list-disc pl-5 text-sm">
<li>