Merge remote-tracking branch 'origin/master' into dev

This commit is contained in:
Blake Blackshear
2024-02-14 18:20:55 -06:00
25 changed files with 408 additions and 91 deletions

View File

@@ -38,6 +38,7 @@ class WebSocketClient(Communicator): # type: ignore[misc]
def __init__(self, config: FrigateConfig) -> None:
self.config = config
self.websocket_server = None
def subscribe(self, receiver: Callable) -> None:
self._dispatcher = receiver
@@ -98,6 +99,10 @@ class WebSocketClient(Communicator): # type: ignore[misc]
logger.debug(f"payload for {topic} wasn't text. Skipping...")
return
if self.websocket_server is None:
logger.debug("Skipping message, websocket not connected yet")
return
try:
self.websocket_server.manager.broadcast(ws_message)
except ConnectionResetError:

View File

@@ -544,6 +544,13 @@ class BirdseyeModeEnum(str, Enum):
return list(cls)[index]
class BirdseyeLayoutConfig(FrigateBaseModel):
scaling_factor: float = Field(
default=2.0, title="Birdseye Scaling Factor", ge=1.0, le=5.0
)
max_cameras: Optional[int] = Field(default=None, title="Max cameras")
class BirdseyeConfig(FrigateBaseModel):
enabled: bool = Field(default=True, title="Enable birdseye view.")
restream: bool = Field(default=False, title="Restream birdseye via RTSP.")
@@ -555,9 +562,15 @@ class BirdseyeConfig(FrigateBaseModel):
ge=1,
le=31,
)
inactivity_threshold: int = Field(
default=30, title="Birdseye Inactivity Threshold", gt=0
)
mode: BirdseyeModeEnum = Field(
default=BirdseyeModeEnum.objects, title="Tracking mode."
)
layout: BirdseyeLayoutConfig = Field(
default_factory=BirdseyeLayoutConfig, title="Birdseye Layout Config"
)
# uses BaseModel because some global attributes are not available at the camera level

View File

@@ -26,6 +26,10 @@ LABEL_CONSOLIDATION_MAP = {
"face": 0.5,
}
LABEL_CONSOLIDATION_DEFAULT = 0.9
LABEL_NMS_MAP = {
"car": 0.6,
}
LABEL_NMS_DEFAULT = 0.4
# Audio Consts

View File

@@ -277,6 +277,13 @@ def send_to_plus(id):
box,
event.label,
)
except ValueError:
message = "Error uploading annotation, unsupported label provided."
logger.error(message)
return make_response(
jsonify({"success": False, "message": message}),
400,
)
except Exception as ex:
logger.exception(ex)
return make_response(
@@ -348,6 +355,13 @@ def false_positive(id):
event.model_type,
event.detector_type,
)
except ValueError:
message = "Error uploading false positive, unsupported label provided."
logger.error(message)
return make_response(
jsonify({"success": False, "message": message}),
400,
)
except Exception as ex:
logger.exception(ex)
return make_response(

View File

@@ -33,11 +33,13 @@ def get_standard_aspect_ratio(width: int, height: int) -> tuple[int, int]:
(16, 9),
(9, 16),
(20, 10),
(16, 3), # max wide camera
(16, 6), # reolink duo 2
(32, 9), # panoramic cameras
(12, 9),
(9, 12),
(22, 15), # Amcrest, NTSC DVT
(1, 1), # fisheye
] # aspects are scaled to have common relative size
known_aspects_ratios = list(
map(lambda aspect: aspect[0] / aspect[1], known_aspects)
@@ -66,7 +68,13 @@ def get_canvas_shape(width: int, height: int) -> tuple[int, int]:
class Canvas:
def __init__(self, canvas_width: int, canvas_height: int) -> None:
def __init__(
self,
canvas_width: int,
canvas_height: int,
scaling_factor: int,
) -> None:
self.scaling_factor = scaling_factor
gcd = math.gcd(canvas_width, canvas_height)
self.aspect = get_standard_aspect_ratio(
(canvas_width / gcd), (canvas_height / gcd)
@@ -80,7 +88,7 @@ class Canvas:
return (self.aspect[0] * coefficient, self.aspect[1] * coefficient)
def get_coefficient(self, camera_count: int) -> int:
return self.coefficient_cache.get(camera_count, 2)
return self.coefficient_cache.get(camera_count, self.scaling_factor)
def set_coefficient(self, camera_count: int, coefficient: int) -> None:
self.coefficient_cache[camera_count] = coefficient
@@ -268,9 +276,13 @@ class BirdsEyeFrameManager:
self.frame_shape = (height, width)
self.yuv_shape = (height * 3 // 2, width)
self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8)
self.canvas = Canvas(width, height)
self.canvas = Canvas(width, height, config.birdseye.layout.scaling_factor)
self.stop_event = stop_event
self.camera_metrics = camera_metrics
self.inactivity_threshold = config.birdseye.inactivity_threshold
if config.birdseye.layout.max_cameras:
self.last_refresh_time = 0
# initialize the frame as black and with the Frigate logo
self.blank_frame = np.zeros(self.yuv_shape, np.uint8)
@@ -376,16 +388,39 @@ class BirdsEyeFrameManager:
def update_frame(self):
"""Update to a new frame for birdseye."""
# determine how many cameras are tracking objects within the last 30 seconds
active_cameras = set(
# determine how many cameras are tracking objects within the last inactivity_threshold seconds
active_cameras: set[str] = set(
[
cam
for cam, cam_data in self.cameras.items()
if cam_data["last_active_frame"] > 0
and cam_data["current_frame"] - cam_data["last_active_frame"] < 30
and cam_data["current_frame"] - cam_data["last_active_frame"]
< self.inactivity_threshold
]
)
max_cameras = self.config.birdseye.layout.max_cameras
max_camera_refresh = False
if max_cameras:
now = datetime.datetime.now().timestamp()
if len(active_cameras) == max_cameras and now - self.last_refresh_time < 10:
# don't refresh cameras too often
active_cameras = self.active_cameras
else:
limited_active_cameras = sorted(
active_cameras,
key=lambda active_camera: (
self.cameras[active_camera]["current_frame"]
- self.cameras[active_camera]["last_active_frame"]
),
)
active_cameras = limited_active_cameras[
: self.config.birdseye.layout.max_cameras
]
max_camera_refresh = True
self.last_refresh_time = now
# if there are no active cameras
if len(active_cameras) == 0:
# if the layout is already cleared
@@ -399,7 +434,18 @@ class BirdsEyeFrameManager:
return True
# check if we need to reset the layout because there is a different number of cameras
reset_layout = len(self.active_cameras) - len(active_cameras) != 0
if len(self.active_cameras) - len(active_cameras) == 0:
if (
len(self.active_cameras) == 1
and self.active_cameras[0] == active_cameras[0]
):
reset_layout = True
elif max_camera_refresh:
reset_layout = True
else:
reset_layout = False
else:
reset_layout = True
# reset the layout if it needs to be different
if reset_layout:
@@ -423,17 +469,23 @@ class BirdsEyeFrameManager:
camera = active_cameras_to_add[0]
camera_dims = self.cameras[camera]["dimensions"].copy()
scaled_width = int(self.canvas.height * camera_dims[0] / camera_dims[1])
coefficient = (
1
if scaled_width <= self.canvas.width
else self.canvas.width / scaled_width
)
# center camera view in canvas and ensure that it fits
if scaled_width < self.canvas.width:
coefficient = 1
x_offset = int((self.canvas.width - scaled_width) / 2)
else:
coefficient = self.canvas.width / scaled_width
x_offset = int(
(self.canvas.width - (scaled_width * coefficient)) / 2
)
self.camera_layout = [
[
(
camera,
(
0,
x_offset,
0,
int(scaled_width * coefficient),
int(self.canvas.height * coefficient),
@@ -477,7 +529,11 @@ class BirdsEyeFrameManager:
return True
def calculate_layout(self, cameras_to_add: list[str], coefficient) -> tuple[any]:
def calculate_layout(
self,
cameras_to_add: list[str],
coefficient: float,
) -> tuple[any]:
"""Calculate the optimal layout for 2+ cameras."""
def map_layout(camera_layout: list[list[any]], row_height: int):

View File

@@ -171,6 +171,17 @@ class PlusApi:
)
if not r.ok:
try:
error_response = r.json()
errors = error_response.get("errors", [])
for error in errors:
if (
error.get("param") == "label"
and error.get("type") == "invalid_enum_value"
):
raise ValueError(f"Unsupported label value provided: {label}")
except ValueError as e:
raise e
raise Exception(r.text)
def add_annotation(
@@ -193,6 +204,17 @@ class PlusApi:
)
if not r.ok:
try:
error_response = r.json()
errors = error_response.get("errors", [])
for error in errors:
if (
error.get("param") == "label"
and error.get("type") == "invalid_enum_value"
):
raise ValueError(f"Unsupported label value provided: {label}")
except ValueError as e:
raise e
raise Exception(r.text)
def get_model_download_url(

View File

@@ -6,6 +6,7 @@ from enum import Enum
import numpy
from onvif import ONVIFCamera, ONVIFError
from zeep.exceptions import Fault, TransportError
from frigate.config import FrigateConfig, ZoomingModeEnum
from frigate.types import PTZMetricsTypes
@@ -66,19 +67,56 @@ class OnvifController:
# create init services
media = onvif.create_media_service()
logger.debug(f"Onvif media xaddr for {camera_name}: {media.xaddr}")
try:
profile = media.GetProfiles()[0]
except ONVIFError as e:
logger.error(f"Unable to connect to camera: {camera_name}: {e}")
# this will fire an exception if camera is not a ptz
capabilities = onvif.get_definition("ptz")
logger.debug(f"Onvif capabilities for {camera_name}: {capabilities}")
except (ONVIFError, Fault, TransportError) as e:
logger.error(
f"Unable to get Onvif capabilities for camera: {camera_name}: {e}"
)
return False
try:
profiles = media.GetProfiles()
except (ONVIFError, Fault, TransportError) as e:
logger.error(
f"Unable to get Onvif media profiles for camera: {camera_name}: {e}"
)
return False
profile = None
for key, onvif_profile in enumerate(profiles):
if (
onvif_profile.VideoEncoderConfiguration
and onvif_profile.VideoEncoderConfiguration.Encoding == "H264"
):
profile = onvif_profile
logger.debug(f"Selected Onvif profile for {camera_name}: {profile}")
break
if profile is None:
logger.error(
f"No appropriate Onvif profiles found for camera: {camera_name}."
)
return False
# get the PTZ config for the profile
try:
configs = profile.PTZConfiguration
logger.debug(
f"Onvif ptz config for media profile in {camera_name}: {configs}"
)
except Exception as e:
logger.error(
f"Invalid Onvif PTZ configuration for camera: {camera_name}: {e}"
)
return False
ptz = onvif.create_ptz_service()
request = ptz.create_type("GetConfigurations")
configs = ptz.GetConfigurations(request)[0]
logger.debug(f"Onvif configs for {camera_name}: {configs}")
request = ptz.create_type("GetConfigurationOptions")
request.ConfigurationToken = profile.PTZConfiguration.token
ptz_config = ptz.GetConfigurationOptions(request)
@@ -187,19 +225,18 @@ class OnvifController:
] = preset["token"]
# get list of supported features
ptz_config = ptz.GetConfigurationOptions(request)
supported_features = []
if ptz_config.Spaces and ptz_config.Spaces.ContinuousPanTiltVelocitySpace:
if configs.DefaultContinuousPanTiltVelocitySpace:
supported_features.append("pt")
if ptz_config.Spaces and ptz_config.Spaces.ContinuousZoomVelocitySpace:
if configs.DefaultContinuousZoomVelocitySpace:
supported_features.append("zoom")
if ptz_config.Spaces and ptz_config.Spaces.RelativePanTiltTranslationSpace:
if configs.DefaultRelativePanTiltTranslationSpace:
supported_features.append("pt-r")
if ptz_config.Spaces and ptz_config.Spaces.RelativeZoomTranslationSpace:
if configs.DefaultRelativeZoomTranslationSpace:
supported_features.append("zoom-r")
try:
# get camera's zoom limits from onvif config
@@ -218,7 +255,7 @@ class OnvifController:
f"Disabling autotracking zooming for {camera_name}: Relative zoom not supported"
)
if ptz_config.Spaces and ptz_config.Spaces.AbsoluteZoomPositionSpace:
if configs.DefaultAbsoluteZoomPositionSpace:
supported_features.append("zoom-a")
try:
# get camera's zoom limits from onvif config
@@ -236,7 +273,10 @@ class OnvifController:
)
# set relative pan/tilt space for autotracker
if fov_space_id is not None:
if (
fov_space_id is not None
and configs.DefaultRelativePanTiltTranslationSpace is not None
):
supported_features.append("pt-r-fov")
self.cams[camera_name][
"relative_fov_range"

View File

@@ -311,6 +311,15 @@ class TestObjectBoundingBoxes(unittest.TestCase):
consolidated_detections = reduce_detections(frame_shape, detections)
assert len(consolidated_detections) == len(detections)
def test_vert_stacked_cars_not_reduced(self):
detections = [
("car", 0.8, (954, 312, 1247, 475), 498512, 1.48, (800, 200, 1400, 600)),
("car", 0.85, (970, 380, 1273, 610), 698752, 1.56, (800, 200, 1400, 700)),
]
frame_shape = (720, 1280)
consolidated_detections = reduce_detections(frame_shape, detections)
assert len(consolidated_detections) == len(detections)
class TestRegionGrid(unittest.TestCase):
def setUp(self) -> None:

View File

@@ -10,7 +10,12 @@ import numpy as np
from peewee import DoesNotExist
from frigate.config import DetectConfig, ModelConfig
from frigate.const import LABEL_CONSOLIDATION_DEFAULT, LABEL_CONSOLIDATION_MAP
from frigate.const import (
LABEL_CONSOLIDATION_DEFAULT,
LABEL_CONSOLIDATION_MAP,
LABEL_NMS_DEFAULT,
LABEL_NMS_MAP,
)
from frigate.detectors.detector_config import PixelFormatEnum
from frigate.models import Event, Regions, Timeline
from frigate.util.image import (
@@ -466,6 +471,7 @@ def reduce_detections(
selected_objects = []
for group in detected_object_groups.values():
label = group[0][0]
# o[2] is the box of the object: xmin, ymin, xmax, ymax
# apply max/min to ensure values do not exceed the known frame size
boxes = [
@@ -483,7 +489,9 @@ def reduce_detections(
# due to min score requirement of NMSBoxes
confidences = [0.6 if clipped(o, frame_shape) else o[1] for o in group]
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
idxs = cv2.dnn.NMSBoxes(
boxes, confidences, 0.5, LABEL_NMS_MAP.get(label, LABEL_NMS_DEFAULT)
)
# add objects
for index in idxs: