forked from Github/frigate
Convert detectors to factory pattern, ability to set different model for each detector (#4635)
* refactor detectors * move create_detector and DetectorTypeEnum * fixed code formatting * add detector model config models * fix detector unit tests * adjust SharedMemory size to largest detector model shape * fix detector model config defaults * enable auto-discovery of detectors * simplify config * simplify config changes further * update detectors docs; detect detector configs dynamic * add suggested changes * remove custom detector doc * fix grammar, adjust device defaults
This commit is contained in:
60
frigate/detectors/plugins/cpu_tfl.py
Normal file
60
frigate/detectors/plugins/cpu_tfl.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import logging
|
||||
import numpy as np
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from typing import Literal
|
||||
from pydantic import Extra, Field
|
||||
import tflite_runtime.interpreter as tflite
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DETECTOR_KEY = "cpu"
|
||||
|
||||
|
||||
class CpuDetectorConfig(BaseDetectorConfig):
|
||||
type: Literal[DETECTOR_KEY]
|
||||
num_threads: int = Field(default=3, title="Number of detection threads")
|
||||
|
||||
|
||||
class CpuTfl(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, detector_config: CpuDetectorConfig):
|
||||
self.interpreter = tflite.Interpreter(
|
||||
model_path=detector_config.model.path or "/cpu_model.tflite",
|
||||
num_threads=detector_config.num_threads or 3,
|
||||
)
|
||||
|
||||
self.interpreter.allocate_tensors()
|
||||
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
|
||||
self.interpreter.invoke()
|
||||
|
||||
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
|
||||
class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
|
||||
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
|
||||
count = int(
|
||||
self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
|
||||
)
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
for i in range(count):
|
||||
if scores[i] < 0.4 or i == 20:
|
||||
break
|
||||
detections[i] = [
|
||||
class_ids[i],
|
||||
float(scores[i]),
|
||||
boxes[i][0],
|
||||
boxes[i][1],
|
||||
boxes[i][2],
|
||||
boxes[i][3],
|
||||
]
|
||||
|
||||
return detections
|
||||
Reference in New Issue
Block a user