Compare commits
451 Commits
v0.2.0
...
v0.8.0-rc2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fcc9cd56cc | ||
|
|
b981a3110b | ||
|
|
2da50cc538 | ||
|
|
cb4a0aa594 | ||
|
|
52da1fddc7 | ||
|
|
14645ce4f8 | ||
|
|
97ce7f3028 | ||
|
|
3b5302f6ea | ||
|
|
74eb16f213 | ||
|
|
a3d6bf214c | ||
|
|
16121ffd00 | ||
|
|
91628bd5d8 | ||
|
|
b10b64bf57 | ||
|
|
749c34be9f | ||
|
|
8cfdfab985 | ||
|
|
ef25f8a31e | ||
|
|
2a0551a08a | ||
|
|
0b80419f15 | ||
|
|
0dc81117aa | ||
|
|
49b29d72a7 | ||
|
|
21ece238ff | ||
|
|
f6ba3f2daa | ||
|
|
bb0d3cb59a | ||
|
|
ca9b6d6c5c | ||
|
|
3103ad2bfe | ||
|
|
eab3998ad0 | ||
|
|
a3dfd3a8e0 | ||
|
|
f1c3087775 | ||
|
|
1be91ed3f2 | ||
|
|
fd83c4f229 | ||
|
|
de99221ad5 | ||
|
|
6892ce56ac | ||
|
|
41cea6f62e | ||
|
|
4bbffa97df | ||
|
|
614f8abfef | ||
|
|
14289b5fd1 | ||
|
|
4164beff1c | ||
|
|
9b3ab486de | ||
|
|
232a49814a | ||
|
|
6c61f0b135 | ||
|
|
c572cec253 | ||
|
|
d4941f2a5f | ||
|
|
bf5ec2f65f | ||
|
|
f8e21584b6 | ||
|
|
3cba83f84b | ||
|
|
dcb4255d7e | ||
|
|
9fc3c0dc2f | ||
|
|
a78830b48e | ||
|
|
949fbadcdc | ||
|
|
12c9e63b13 | ||
|
|
157b230702 | ||
|
|
c69299d659 | ||
|
|
285d630770 | ||
|
|
b9318092f4 | ||
|
|
905c361d52 | ||
|
|
4443abbc49 | ||
|
|
dabb36ad93 | ||
|
|
2bc8736fd9 | ||
|
|
e9b3b09cc2 | ||
|
|
ca337c32b4 | ||
|
|
24b8bd7c85 | ||
|
|
3ad75a441d | ||
|
|
f006e9be8d | ||
|
|
03f3ba8008 | ||
|
|
96a44eb7bf | ||
|
|
006782fe3d | ||
|
|
ff3e95bbf7 | ||
|
|
4b95a37e65 | ||
|
|
38c661b3a8 | ||
|
|
0d6e4f6a66 | ||
|
|
1ad2219f1c | ||
|
|
dfcdd289c3 | ||
|
|
32f5f2cca9 | ||
|
|
24bfe9f3e8 | ||
|
|
004667dc99 | ||
|
|
9d785dc781 | ||
|
|
cbba5a7af0 | ||
|
|
29b29ee349 | ||
|
|
9ad53e09af | ||
|
|
c9278991c9 | ||
|
|
729de48934 | ||
|
|
7476bff5fb | ||
|
|
1e9eae8d9a | ||
|
|
8113a53381 | ||
|
|
72833686f1 | ||
|
|
096c21f105 | ||
|
|
181f66357b | ||
|
|
a54fbc483c | ||
|
|
92d5a002d3 | ||
|
|
f9184903d7 | ||
|
|
91cde6ce7b | ||
|
|
186a4587c7 | ||
|
|
6049acb1f3 | ||
|
|
2d2ebf313c | ||
|
|
3d329dcb52 | ||
|
|
06854fc34f | ||
|
|
e01e14d866 | ||
|
|
3dfd251ebb | ||
|
|
dcea807f77 | ||
|
|
87d83ff33a | ||
|
|
1d31cbdf0d | ||
|
|
e05b27b8dc | ||
|
|
7111bd208e | ||
|
|
04a80280da | ||
|
|
3bda092140 | ||
|
|
9086820479 | ||
|
|
d1da57aedc | ||
|
|
6ded12c566 | ||
|
|
70352566a7 | ||
|
|
cf5cc86588 | ||
|
|
e41db49ab8 | ||
|
|
1b7effafee | ||
|
|
69e9e0b0bf | ||
|
|
89624df411 | ||
|
|
d1a7405211 | ||
|
|
040f8c7c20 | ||
|
|
6d7acabf4c | ||
|
|
45a8b42157 | ||
|
|
8785be24b7 | ||
|
|
cc0812540c | ||
|
|
5cf38ca4f7 | ||
|
|
7e4395c30e | ||
|
|
598d3aeda2 | ||
|
|
012dbf81f7 | ||
|
|
f869def12e | ||
|
|
31f7666337 | ||
|
|
9e339acbca | ||
|
|
8f8054a299 | ||
|
|
f7021eec4c | ||
|
|
c124153da4 | ||
|
|
706c2f921e | ||
|
|
de1d66bcb9 | ||
|
|
4502ca8e80 | ||
|
|
32a66fe5e8 | ||
|
|
e1251aafdb | ||
|
|
587494068c | ||
|
|
7a4d90a47a | ||
|
|
d06b587d33 | ||
|
|
eef70e434b | ||
|
|
b39da3ee01 | ||
|
|
e07c4e0d8c | ||
|
|
2f41ba6f77 | ||
|
|
bf95af0f22 | ||
|
|
2e15847f86 | ||
|
|
5992e85dc8 | ||
|
|
24d416b869 | ||
|
|
5dbf368c4b | ||
|
|
7d56fe105f | ||
|
|
e9327aa18c | ||
|
|
df56e079de | ||
|
|
8c5bfbd187 | ||
|
|
2613e74f97 | ||
|
|
9a7fb96357 | ||
|
|
37f9dfed92 | ||
|
|
68c1544808 | ||
|
|
2b3d3c5824 | ||
|
|
efea87a3ea | ||
|
|
977785fb10 | ||
|
|
4e113e62c0 | ||
|
|
5080b2d781 | ||
|
|
5cfd6d1edb | ||
|
|
27ae4d8ab0 | ||
|
|
3db33302ec | ||
|
|
f2910d48e0 | ||
|
|
cf0f8892e2 | ||
|
|
4d22e172ff | ||
|
|
8874a55b0f | ||
|
|
24b703a875 | ||
|
|
8b8f5b5c40 | ||
|
|
eac81136d2 | ||
|
|
d1e27b43ea | ||
|
|
105dcb7094 | ||
|
|
c0a16efdc1 | ||
|
|
2800c54743 | ||
|
|
2a24e8abcb | ||
|
|
37ee746ebb | ||
|
|
7ee6bfe855 | ||
|
|
40f57a8754 | ||
|
|
e0da462223 | ||
|
|
47a9fc4292 | ||
|
|
03fe5158db | ||
|
|
72be6b480d | ||
|
|
a8964dcc1f | ||
|
|
732e91ee42 | ||
|
|
27da080ce6 | ||
|
|
075d06b108 | ||
|
|
95dc17ffcd | ||
|
|
408b53f8b4 | ||
|
|
3ef68a297a | ||
|
|
3e9b3711dc | ||
|
|
a1cc9ad1f0 | ||
|
|
29e8aa4020 | ||
|
|
777aff403f | ||
|
|
4b3b702459 | ||
|
|
893e6b40a7 | ||
|
|
a85d780020 | ||
|
|
34439699ae | ||
|
|
64b63142b1 | ||
|
|
cee1ab000b | ||
|
|
3ff98770c1 | ||
|
|
244203463d | ||
|
|
b6f7940b10 | ||
|
|
75312602aa | ||
|
|
75977128f0 | ||
|
|
eafde6c677 | ||
|
|
da0598baef | ||
|
|
35ba5e2f7c | ||
|
|
49258d6dbe | ||
|
|
5a081e4f00 | ||
|
|
4feae472e9 | ||
|
|
4e83239258 | ||
|
|
c4cccf44a5 | ||
|
|
64e7cbcc62 | ||
|
|
dd86e4f317 | ||
|
|
4db285a875 | ||
|
|
939d1ba091 | ||
|
|
0fe8d486d9 | ||
|
|
a3cb02af5c | ||
|
|
45a6b8452c | ||
|
|
9d594cc640 | ||
|
|
59e41ae1ac | ||
|
|
c6ed16465b | ||
|
|
8f14b36f5a | ||
|
|
b6c2491e3b | ||
|
|
8e31d04d90 | ||
|
|
bf93fbb357 | ||
|
|
c064b244db | ||
|
|
0280610e96 | ||
|
|
4363623c45 | ||
|
|
c960914ec3 | ||
|
|
9ecc80b443 | ||
|
|
3e146de0a2 | ||
|
|
bee54c39dc | ||
|
|
623d138d60 | ||
|
|
76befc1249 | ||
|
|
51251b9fb0 | ||
|
|
8c45076bb6 | ||
|
|
7d683ef399 | ||
|
|
e4da3822b1 | ||
|
|
12c4cd77c5 | ||
|
|
a611cbb942 | ||
|
|
f946813ccb | ||
|
|
49fca1b839 | ||
|
|
54cb4a2180 | ||
|
|
9954e3b11e | ||
|
|
82692b0ddc | ||
|
|
9d4fdec12f | ||
|
|
ed72c995ef | ||
|
|
66c77d1157 | ||
|
|
40c322ad47 | ||
|
|
83f1e0d713 | ||
|
|
2d89044bd3 | ||
|
|
dc4d24c2b9 | ||
|
|
d5fb20c524 | ||
|
|
7e92e8bfe8 | ||
|
|
efdcfcef97 | ||
|
|
574ee2a46f | ||
|
|
ec4d048905 | ||
|
|
b063099b2a | ||
|
|
2937dac4c3 | ||
|
|
7c283a1805 | ||
|
|
309c0dcda3 | ||
|
|
b35cc01035 | ||
|
|
6e79a5402e | ||
|
|
a989f8daaf | ||
|
|
7880d24b29 | ||
|
|
fdc8bbf72d | ||
|
|
005e188d38 | ||
|
|
adcc3e9b98 | ||
|
|
5fe201da25 | ||
|
|
974f7bd0df | ||
|
|
780ae7cd4f | ||
|
|
50e568b84c | ||
|
|
1ce993051e | ||
|
|
69406343ee | ||
|
|
1c33b8acb2 | ||
|
|
5e77436d39 | ||
|
|
e26308a05b | ||
|
|
c16ee3186f | ||
|
|
fedeeab561 | ||
|
|
bfcaabecfa | ||
|
|
606fa6f6d5 | ||
|
|
6a8d8bf53d | ||
|
|
1f81cba706 | ||
|
|
5db7b242aa | ||
|
|
0b7f65e227 | ||
|
|
2f758af097 | ||
|
|
f64320a464 | ||
|
|
3e87ef6426 | ||
|
|
acb75fa02d | ||
|
|
ea4ecae27c | ||
|
|
a8556a729b | ||
|
|
068df3ef2d | ||
|
|
b304139db2 | ||
|
|
df2aae5169 | ||
|
|
351ac4ec7d | ||
|
|
12e40291c0 | ||
|
|
8af7d51159 | ||
|
|
84ada716ac | ||
|
|
cbcc89be9c | ||
|
|
73a5e11b9b | ||
|
|
194baaeb56 | ||
|
|
469259d663 | ||
|
|
f3db69d975 | ||
|
|
0914cb71ad | ||
|
|
0ae2806eb4 | ||
|
|
adcfe699c2 | ||
|
|
e5048f98b6 | ||
|
|
e6c6338266 | ||
|
|
1f03c8cb8c | ||
|
|
69f5249788 | ||
|
|
3a1f1c946b | ||
|
|
d88745af6e | ||
|
|
709d917f0c | ||
|
|
918386bdc1 | ||
|
|
a8c0fadf95 | ||
|
|
6dc7b8f246 | ||
|
|
71f6f0bee4 | ||
|
|
a00afb61c0 | ||
|
|
5dbe6c5f36 | ||
|
|
16732aa5b3 | ||
|
|
3d2f1437e4 | ||
|
|
fbe721c860 | ||
|
|
7383db60b0 | ||
|
|
53ccc903da | ||
|
|
9d1f9f35e5 | ||
|
|
c1f522ff54 | ||
|
|
b345571a63 | ||
|
|
f29ee6165f | ||
|
|
ec6432cc5f | ||
|
|
8c917667b6 | ||
|
|
941434b8d8 | ||
|
|
2d0632adf8 | ||
|
|
f1afaf641a | ||
|
|
743116a733 | ||
|
|
8e77cf25d9 | ||
|
|
7d33e03943 | ||
|
|
0c44666c89 | ||
|
|
ddaa746807 | ||
|
|
760e1ffe1d | ||
|
|
15b4024715 | ||
|
|
918112a793 | ||
|
|
4ee200a81c | ||
|
|
e37eba49ff | ||
|
|
6de8e3bd1f | ||
|
|
3a9781c4f8 | ||
|
|
a60b9211d2 | ||
|
|
777fb1d5d1 | ||
|
|
8e9110f42e | ||
|
|
c80137e059 | ||
|
|
2768e1dadb | ||
|
|
2fbba01577 | ||
|
|
e7c536ea31 | ||
|
|
1734c0569a | ||
|
|
a5bef89123 | ||
|
|
d8aa73d26e | ||
|
|
791409d5e5 | ||
|
|
01bf89907d | ||
|
|
8e73c7e95e | ||
|
|
088bd18adb | ||
|
|
2e8c7ec225 | ||
|
|
9340a74371 | ||
|
|
5998de610b | ||
|
|
dfabff3846 | ||
|
|
76a7a3bad5 | ||
|
|
a3fa97dd52 | ||
|
|
1d2a41129c | ||
|
|
956298128d | ||
|
|
e6892d66b8 | ||
|
|
6ef22cf578 | ||
|
|
3e6f6edf7e | ||
|
|
81c5b96ed7 | ||
|
|
6f6d202c99 | ||
|
|
2fc389c3ad | ||
|
|
05951aa7da | ||
|
|
bb8e4621f5 | ||
|
|
04e9ab5ce4 | ||
|
|
1089a40943 | ||
|
|
68c3a069ba | ||
|
|
80b9652f7a | ||
|
|
569e07949f | ||
|
|
ffa9534549 | ||
|
|
c539993387 | ||
|
|
8a572f96d5 | ||
|
|
24cb3508e8 | ||
|
|
3f34c57e31 | ||
|
|
4c618daa90 | ||
|
|
cd057370e1 | ||
|
|
6263912655 | ||
|
|
af247275cf | ||
|
|
1198c29dac | ||
|
|
169603d3ff | ||
|
|
dc7eecebc6 | ||
|
|
0dd4087d5d | ||
|
|
6ecf87fc60 | ||
|
|
ebcf1482f8 | ||
|
|
50bcf60893 | ||
|
|
38efbd63ea | ||
|
|
50bcad8b77 | ||
|
|
cfffb219ae | ||
|
|
382d7be50a | ||
|
|
f43dc36a37 | ||
|
|
38e7fa07d2 | ||
|
|
e261c20819 | ||
|
|
3a66e672d3 | ||
|
|
2aada930e3 | ||
|
|
d87f4407a0 | ||
|
|
be5a114f6a | ||
|
|
32b212c7b6 | ||
|
|
76c8e3a12f | ||
|
|
16f7a361c3 | ||
|
|
634b87307f | ||
|
|
1d4fbbdba3 | ||
|
|
65579e9cbf | ||
|
|
49dc029c43 | ||
|
|
08174d8db2 | ||
|
|
5199242a68 | ||
|
|
725dd3220c | ||
|
|
10dc56f6ea | ||
|
|
cc2abe93a6 | ||
|
|
0c6717090c | ||
|
|
f5a2252b29 | ||
|
|
02efb6f415 | ||
|
|
5b4c6e50bc | ||
|
|
9cc46a71cb | ||
|
|
be1673b00a | ||
|
|
b6130e77ff | ||
|
|
4180c710cd | ||
|
|
ab3e70b4db | ||
|
|
d90e408d50 | ||
|
|
6c87ce0879 | ||
|
|
b7b4e38f62 | ||
|
|
480175d70f | ||
|
|
bee99ca6ff | ||
|
|
5c01720567 | ||
|
|
262f45c8bc | ||
|
|
22bb17b2fd | ||
|
|
3a3afe14bf | ||
|
|
01f058a482 | ||
|
|
d899ef158e | ||
|
|
39d64f7ba7 | ||
|
|
f148eb5a7b | ||
|
|
297e2f1c0c | ||
|
|
e818744d81 | ||
|
|
ceedfae993 | ||
|
|
e13563770d | ||
|
|
a659019d1a | ||
|
|
ba71927d53 | ||
|
|
04fed31eac | ||
|
|
ebaa8fac01 | ||
|
|
2ec45cd1b6 |
@@ -1,6 +1,7 @@
|
|||||||
README.md
|
README.md
|
||||||
diagram.png
|
docs/
|
||||||
.gitignore
|
.gitignore
|
||||||
debug
|
debug
|
||||||
config/
|
config/
|
||||||
*.pyc
|
*.pyc
|
||||||
|
.git
|
||||||
2
.github/FUNDING.yml
vendored
@@ -1 +1 @@
|
|||||||
ko_fi: blakeblackshear
|
github: blakeblackshear
|
||||||
|
|||||||
56
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
---
|
||||||
|
name: Bug report or Support request
|
||||||
|
about: ''
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Describe the bug**
|
||||||
|
A clear and concise description of what your issue is.
|
||||||
|
|
||||||
|
**Version of frigate**
|
||||||
|
Output from `/version`
|
||||||
|
|
||||||
|
**Config file**
|
||||||
|
Include your full config file wrapped in triple back ticks.
|
||||||
|
```yaml
|
||||||
|
config here
|
||||||
|
```
|
||||||
|
|
||||||
|
**Frigate container logs**
|
||||||
|
```
|
||||||
|
Include relevant log output here
|
||||||
|
```
|
||||||
|
|
||||||
|
**Frigate stats**
|
||||||
|
```json
|
||||||
|
Output from frigate's /stats endpoint
|
||||||
|
```
|
||||||
|
|
||||||
|
**FFprobe from your camera**
|
||||||
|
|
||||||
|
Run the following command and paste output below
|
||||||
|
```
|
||||||
|
ffprobe <stream_url>
|
||||||
|
```
|
||||||
|
|
||||||
|
**Screenshots**
|
||||||
|
If applicable, add screenshots to help explain your problem.
|
||||||
|
|
||||||
|
**Computer Hardware**
|
||||||
|
- OS: [e.g. Ubuntu, Windows]
|
||||||
|
- Install method: [e.g. Addon, Docker Compose, Docker Command]
|
||||||
|
- Virtualization: [e.g. Proxmox, Virtualbox]
|
||||||
|
- Coral Version: [e.g. USB, PCIe, None]
|
||||||
|
- Network Setup: [e.g. Wired, WiFi]
|
||||||
|
|
||||||
|
**Camera Info:**
|
||||||
|
- Manufacturer: [e.g. Dahua]
|
||||||
|
- Model: [e.g. IPC-HDW5231R-ZE]
|
||||||
|
- Resolution: [e.g. 720p]
|
||||||
|
- FPS: [e.g. 5]
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context about the problem here.
|
||||||
11
.gitignore
vendored
@@ -1,2 +1,11 @@
|
|||||||
*.pyc
|
.DS_Store
|
||||||
|
*.pyc
|
||||||
debug
|
debug
|
||||||
|
.vscode
|
||||||
|
config/config.yml
|
||||||
|
models
|
||||||
|
*.mp4
|
||||||
|
*.db
|
||||||
|
frigate/version.py
|
||||||
|
web/build
|
||||||
|
web/node_modules
|
||||||
|
|||||||
109
Dockerfile
@@ -1,109 +0,0 @@
|
|||||||
FROM ubuntu:18.04
|
|
||||||
|
|
||||||
ARG DEVICE
|
|
||||||
|
|
||||||
# Install packages for apt repo
|
|
||||||
RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
|
|
||||||
apt-transport-https \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
wget \
|
|
||||||
gnupg-agent \
|
|
||||||
dirmngr \
|
|
||||||
software-properties-common \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
COPY scripts/install_odroid_repo.sh .
|
|
||||||
|
|
||||||
RUN if [ "$DEVICE" = "odroid" ]; then \
|
|
||||||
sh /install_odroid_repo.sh; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
|
|
||||||
python3 \
|
|
||||||
# OpenCV dependencies
|
|
||||||
ffmpeg \
|
|
||||||
build-essential \
|
|
||||||
cmake \
|
|
||||||
unzip \
|
|
||||||
pkg-config \
|
|
||||||
libjpeg-dev \
|
|
||||||
libpng-dev \
|
|
||||||
libtiff-dev \
|
|
||||||
libavcodec-dev \
|
|
||||||
libavformat-dev \
|
|
||||||
libswscale-dev \
|
|
||||||
libv4l-dev \
|
|
||||||
libxvidcore-dev \
|
|
||||||
libx264-dev \
|
|
||||||
libgtk-3-dev \
|
|
||||||
libatlas-base-dev \
|
|
||||||
gfortran \
|
|
||||||
python3-dev \
|
|
||||||
# Coral USB Python API Dependencies
|
|
||||||
libusb-1.0-0 \
|
|
||||||
python3-pip \
|
|
||||||
python3-pil \
|
|
||||||
python3-numpy \
|
|
||||||
libc++1 \
|
|
||||||
libc++abi1 \
|
|
||||||
libunwind8 \
|
|
||||||
libgcc1 \
|
|
||||||
# VAAPI drivers for Intel hardware accel
|
|
||||||
libva-drm2 libva2 i965-va-driver vainfo \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Install core packages
|
|
||||||
RUN wget -q -O /tmp/get-pip.py --no-check-certificate https://bootstrap.pypa.io/get-pip.py && python3 /tmp/get-pip.py
|
|
||||||
RUN pip install -U pip \
|
|
||||||
numpy \
|
|
||||||
Flask \
|
|
||||||
paho-mqtt \
|
|
||||||
PyYAML
|
|
||||||
|
|
||||||
# Download & build OpenCV
|
|
||||||
# TODO: use multistage build to reduce image size:
|
|
||||||
# https://medium.com/@denismakogon/pain-and-gain-running-opencv-application-with-golang-and-docker-on-alpine-3-7-435aa11c7aec
|
|
||||||
# https://www.merixstudio.com/blog/docker-multi-stage-builds-python-development/
|
|
||||||
RUN wget -q -P /usr/local/src/ --no-check-certificate https://github.com/opencv/opencv/archive/4.0.1.zip
|
|
||||||
RUN cd /usr/local/src/ \
|
|
||||||
&& unzip 4.0.1.zip \
|
|
||||||
&& rm 4.0.1.zip \
|
|
||||||
&& cd /usr/local/src/opencv-4.0.1/ \
|
|
||||||
&& mkdir build \
|
|
||||||
&& cd /usr/local/src/opencv-4.0.1/build \
|
|
||||||
&& cmake -D CMAKE_INSTALL_TYPE=Release -D CMAKE_INSTALL_PREFIX=/usr/local/ .. \
|
|
||||||
&& make -j4 \
|
|
||||||
&& make install \
|
|
||||||
&& ldconfig \
|
|
||||||
&& rm -rf /usr/local/src/opencv-4.0.1
|
|
||||||
|
|
||||||
# Download and install EdgeTPU libraries for Coral
|
|
||||||
RUN wget https://dl.google.com/coral/edgetpu_api/edgetpu_api_latest.tar.gz -O edgetpu_api.tar.gz --trust-server-names \
|
|
||||||
&& tar xzf edgetpu_api.tar.gz
|
|
||||||
|
|
||||||
COPY scripts/install_edgetpu_api.sh edgetpu_api/install.sh
|
|
||||||
|
|
||||||
RUN cd edgetpu_api \
|
|
||||||
&& /bin/bash install.sh
|
|
||||||
|
|
||||||
# Copy a python 3.6 version
|
|
||||||
RUN cd /usr/local/lib/python3.6/dist-packages/edgetpu/swig/ \
|
|
||||||
&& ln -s _edgetpu_cpp_wrapper.cpython-35m-arm-linux-gnueabihf.so _edgetpu_cpp_wrapper.cpython-36m-arm-linux-gnueabihf.so
|
|
||||||
|
|
||||||
# symlink the model and labels
|
|
||||||
RUN wget https://dl.google.com/coral/canned_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite -O mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite --trust-server-names
|
|
||||||
RUN wget https://dl.google.com/coral/canned_models/coco_labels.txt -O coco_labels.txt --trust-server-names
|
|
||||||
RUN ln -s mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite /frozen_inference_graph.pb
|
|
||||||
RUN ln -s /coco_labels.txt /label_map.pbtext
|
|
||||||
|
|
||||||
# Minimize image size
|
|
||||||
RUN (apt-get autoremove -y; \
|
|
||||||
apt-get autoclean -y)
|
|
||||||
|
|
||||||
WORKDIR /opt/frigate/
|
|
||||||
ADD frigate frigate/
|
|
||||||
COPY detect_objects.py .
|
|
||||||
COPY benchmark.py .
|
|
||||||
|
|
||||||
CMD ["python3", "-u", "detect_objects.py"]
|
|
||||||
682
LICENSE
@@ -1,661 +1,21 @@
|
|||||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
The MIT License
|
||||||
Version 3, 19 November 2007
|
|
||||||
|
Copyright (c) 2020 Blake Blackshear
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this license document, but changing it is not allowed.
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
Preamble
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
The GNU Affero General Public License is a free, copyleft license for
|
furnished to do so, subject to the following conditions:
|
||||||
software and other kinds of works, specifically designed to ensure
|
|
||||||
cooperation with the community in the case of network server software.
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
The licenses for most software and other practical works are designed
|
|
||||||
to take away your freedom to share and change the works. By contrast,
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
our General Public Licenses are intended to guarantee your freedom to
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
share and change all versions of a program--to make sure it remains free
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
software for all its users.
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
When we speak of free software, we are referring to freedom, not
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
price. Our General Public Licenses are designed to make sure that you
|
SOFTWARE.
|
||||||
have the freedom to distribute copies of free software (and charge for
|
|
||||||
them if you wish), that you receive source code or can get it if you
|
|
||||||
want it, that you can change the software or use pieces of it in new
|
|
||||||
free programs, and that you know you can do these things.
|
|
||||||
|
|
||||||
Developers that use our General Public Licenses protect your rights
|
|
||||||
with two steps: (1) assert copyright on the software, and (2) offer
|
|
||||||
you this License which gives you legal permission to copy, distribute
|
|
||||||
and/or modify the software.
|
|
||||||
|
|
||||||
A secondary benefit of defending all users' freedom is that
|
|
||||||
improvements made in alternate versions of the program, if they
|
|
||||||
receive widespread use, become available for other developers to
|
|
||||||
incorporate. Many developers of free software are heartened and
|
|
||||||
encouraged by the resulting cooperation. However, in the case of
|
|
||||||
software used on network servers, this result may fail to come about.
|
|
||||||
The GNU General Public License permits making a modified version and
|
|
||||||
letting the public access it on a server without ever releasing its
|
|
||||||
source code to the public.
|
|
||||||
|
|
||||||
The GNU Affero General Public License is designed specifically to
|
|
||||||
ensure that, in such cases, the modified source code becomes available
|
|
||||||
to the community. It requires the operator of a network server to
|
|
||||||
provide the source code of the modified version running there to the
|
|
||||||
users of that server. Therefore, public use of a modified version, on
|
|
||||||
a publicly accessible server, gives the public access to the source
|
|
||||||
code of the modified version.
|
|
||||||
|
|
||||||
An older license, called the Affero General Public License and
|
|
||||||
published by Affero, was designed to accomplish similar goals. This is
|
|
||||||
a different license, not a version of the Affero GPL, but Affero has
|
|
||||||
released a new version of the Affero GPL which permits relicensing under
|
|
||||||
this license.
|
|
||||||
|
|
||||||
The precise terms and conditions for copying, distribution and
|
|
||||||
modification follow.
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
0. Definitions.
|
|
||||||
|
|
||||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
|
||||||
|
|
||||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
|
||||||
works, such as semiconductor masks.
|
|
||||||
|
|
||||||
"The Program" refers to any copyrightable work licensed under this
|
|
||||||
License. Each licensee is addressed as "you". "Licensees" and
|
|
||||||
"recipients" may be individuals or organizations.
|
|
||||||
|
|
||||||
To "modify" a work means to copy from or adapt all or part of the work
|
|
||||||
in a fashion requiring copyright permission, other than the making of an
|
|
||||||
exact copy. The resulting work is called a "modified version" of the
|
|
||||||
earlier work or a work "based on" the earlier work.
|
|
||||||
|
|
||||||
A "covered work" means either the unmodified Program or a work based
|
|
||||||
on the Program.
|
|
||||||
|
|
||||||
To "propagate" a work means to do anything with it that, without
|
|
||||||
permission, would make you directly or secondarily liable for
|
|
||||||
infringement under applicable copyright law, except executing it on a
|
|
||||||
computer or modifying a private copy. Propagation includes copying,
|
|
||||||
distribution (with or without modification), making available to the
|
|
||||||
public, and in some countries other activities as well.
|
|
||||||
|
|
||||||
To "convey" a work means any kind of propagation that enables other
|
|
||||||
parties to make or receive copies. Mere interaction with a user through
|
|
||||||
a computer network, with no transfer of a copy, is not conveying.
|
|
||||||
|
|
||||||
An interactive user interface displays "Appropriate Legal Notices"
|
|
||||||
to the extent that it includes a convenient and prominently visible
|
|
||||||
feature that (1) displays an appropriate copyright notice, and (2)
|
|
||||||
tells the user that there is no warranty for the work (except to the
|
|
||||||
extent that warranties are provided), that licensees may convey the
|
|
||||||
work under this License, and how to view a copy of this License. If
|
|
||||||
the interface presents a list of user commands or options, such as a
|
|
||||||
menu, a prominent item in the list meets this criterion.
|
|
||||||
|
|
||||||
1. Source Code.
|
|
||||||
|
|
||||||
The "source code" for a work means the preferred form of the work
|
|
||||||
for making modifications to it. "Object code" means any non-source
|
|
||||||
form of a work.
|
|
||||||
|
|
||||||
A "Standard Interface" means an interface that either is an official
|
|
||||||
standard defined by a recognized standards body, or, in the case of
|
|
||||||
interfaces specified for a particular programming language, one that
|
|
||||||
is widely used among developers working in that language.
|
|
||||||
|
|
||||||
The "System Libraries" of an executable work include anything, other
|
|
||||||
than the work as a whole, that (a) is included in the normal form of
|
|
||||||
packaging a Major Component, but which is not part of that Major
|
|
||||||
Component, and (b) serves only to enable use of the work with that
|
|
||||||
Major Component, or to implement a Standard Interface for which an
|
|
||||||
implementation is available to the public in source code form. A
|
|
||||||
"Major Component", in this context, means a major essential component
|
|
||||||
(kernel, window system, and so on) of the specific operating system
|
|
||||||
(if any) on which the executable work runs, or a compiler used to
|
|
||||||
produce the work, or an object code interpreter used to run it.
|
|
||||||
|
|
||||||
The "Corresponding Source" for a work in object code form means all
|
|
||||||
the source code needed to generate, install, and (for an executable
|
|
||||||
work) run the object code and to modify the work, including scripts to
|
|
||||||
control those activities. However, it does not include the work's
|
|
||||||
System Libraries, or general-purpose tools or generally available free
|
|
||||||
programs which are used unmodified in performing those activities but
|
|
||||||
which are not part of the work. For example, Corresponding Source
|
|
||||||
includes interface definition files associated with source files for
|
|
||||||
the work, and the source code for shared libraries and dynamically
|
|
||||||
linked subprograms that the work is specifically designed to require,
|
|
||||||
such as by intimate data communication or control flow between those
|
|
||||||
subprograms and other parts of the work.
|
|
||||||
|
|
||||||
The Corresponding Source need not include anything that users
|
|
||||||
can regenerate automatically from other parts of the Corresponding
|
|
||||||
Source.
|
|
||||||
|
|
||||||
The Corresponding Source for a work in source code form is that
|
|
||||||
same work.
|
|
||||||
|
|
||||||
2. Basic Permissions.
|
|
||||||
|
|
||||||
All rights granted under this License are granted for the term of
|
|
||||||
copyright on the Program, and are irrevocable provided the stated
|
|
||||||
conditions are met. This License explicitly affirms your unlimited
|
|
||||||
permission to run the unmodified Program. The output from running a
|
|
||||||
covered work is covered by this License only if the output, given its
|
|
||||||
content, constitutes a covered work. This License acknowledges your
|
|
||||||
rights of fair use or other equivalent, as provided by copyright law.
|
|
||||||
|
|
||||||
You may make, run and propagate covered works that you do not
|
|
||||||
convey, without conditions so long as your license otherwise remains
|
|
||||||
in force. You may convey covered works to others for the sole purpose
|
|
||||||
of having them make modifications exclusively for you, or provide you
|
|
||||||
with facilities for running those works, provided that you comply with
|
|
||||||
the terms of this License in conveying all material for which you do
|
|
||||||
not control copyright. Those thus making or running the covered works
|
|
||||||
for you must do so exclusively on your behalf, under your direction
|
|
||||||
and control, on terms that prohibit them from making any copies of
|
|
||||||
your copyrighted material outside their relationship with you.
|
|
||||||
|
|
||||||
Conveying under any other circumstances is permitted solely under
|
|
||||||
the conditions stated below. Sublicensing is not allowed; section 10
|
|
||||||
makes it unnecessary.
|
|
||||||
|
|
||||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
|
||||||
|
|
||||||
No covered work shall be deemed part of an effective technological
|
|
||||||
measure under any applicable law fulfilling obligations under article
|
|
||||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
|
||||||
similar laws prohibiting or restricting circumvention of such
|
|
||||||
measures.
|
|
||||||
|
|
||||||
When you convey a covered work, you waive any legal power to forbid
|
|
||||||
circumvention of technological measures to the extent such circumvention
|
|
||||||
is effected by exercising rights under this License with respect to
|
|
||||||
the covered work, and you disclaim any intention to limit operation or
|
|
||||||
modification of the work as a means of enforcing, against the work's
|
|
||||||
users, your or third parties' legal rights to forbid circumvention of
|
|
||||||
technological measures.
|
|
||||||
|
|
||||||
4. Conveying Verbatim Copies.
|
|
||||||
|
|
||||||
You may convey verbatim copies of the Program's source code as you
|
|
||||||
receive it, in any medium, provided that you conspicuously and
|
|
||||||
appropriately publish on each copy an appropriate copyright notice;
|
|
||||||
keep intact all notices stating that this License and any
|
|
||||||
non-permissive terms added in accord with section 7 apply to the code;
|
|
||||||
keep intact all notices of the absence of any warranty; and give all
|
|
||||||
recipients a copy of this License along with the Program.
|
|
||||||
|
|
||||||
You may charge any price or no price for each copy that you convey,
|
|
||||||
and you may offer support or warranty protection for a fee.
|
|
||||||
|
|
||||||
5. Conveying Modified Source Versions.
|
|
||||||
|
|
||||||
You may convey a work based on the Program, or the modifications to
|
|
||||||
produce it from the Program, in the form of source code under the
|
|
||||||
terms of section 4, provided that you also meet all of these conditions:
|
|
||||||
|
|
||||||
a) The work must carry prominent notices stating that you modified
|
|
||||||
it, and giving a relevant date.
|
|
||||||
|
|
||||||
b) The work must carry prominent notices stating that it is
|
|
||||||
released under this License and any conditions added under section
|
|
||||||
7. This requirement modifies the requirement in section 4 to
|
|
||||||
"keep intact all notices".
|
|
||||||
|
|
||||||
c) You must license the entire work, as a whole, under this
|
|
||||||
License to anyone who comes into possession of a copy. This
|
|
||||||
License will therefore apply, along with any applicable section 7
|
|
||||||
additional terms, to the whole of the work, and all its parts,
|
|
||||||
regardless of how they are packaged. This License gives no
|
|
||||||
permission to license the work in any other way, but it does not
|
|
||||||
invalidate such permission if you have separately received it.
|
|
||||||
|
|
||||||
d) If the work has interactive user interfaces, each must display
|
|
||||||
Appropriate Legal Notices; however, if the Program has interactive
|
|
||||||
interfaces that do not display Appropriate Legal Notices, your
|
|
||||||
work need not make them do so.
|
|
||||||
|
|
||||||
A compilation of a covered work with other separate and independent
|
|
||||||
works, which are not by their nature extensions of the covered work,
|
|
||||||
and which are not combined with it such as to form a larger program,
|
|
||||||
in or on a volume of a storage or distribution medium, is called an
|
|
||||||
"aggregate" if the compilation and its resulting copyright are not
|
|
||||||
used to limit the access or legal rights of the compilation's users
|
|
||||||
beyond what the individual works permit. Inclusion of a covered work
|
|
||||||
in an aggregate does not cause this License to apply to the other
|
|
||||||
parts of the aggregate.
|
|
||||||
|
|
||||||
6. Conveying Non-Source Forms.
|
|
||||||
|
|
||||||
You may convey a covered work in object code form under the terms
|
|
||||||
of sections 4 and 5, provided that you also convey the
|
|
||||||
machine-readable Corresponding Source under the terms of this License,
|
|
||||||
in one of these ways:
|
|
||||||
|
|
||||||
a) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by the
|
|
||||||
Corresponding Source fixed on a durable physical medium
|
|
||||||
customarily used for software interchange.
|
|
||||||
|
|
||||||
b) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by a
|
|
||||||
written offer, valid for at least three years and valid for as
|
|
||||||
long as you offer spare parts or customer support for that product
|
|
||||||
model, to give anyone who possesses the object code either (1) a
|
|
||||||
copy of the Corresponding Source for all the software in the
|
|
||||||
product that is covered by this License, on a durable physical
|
|
||||||
medium customarily used for software interchange, for a price no
|
|
||||||
more than your reasonable cost of physically performing this
|
|
||||||
conveying of source, or (2) access to copy the
|
|
||||||
Corresponding Source from a network server at no charge.
|
|
||||||
|
|
||||||
c) Convey individual copies of the object code with a copy of the
|
|
||||||
written offer to provide the Corresponding Source. This
|
|
||||||
alternative is allowed only occasionally and noncommercially, and
|
|
||||||
only if you received the object code with such an offer, in accord
|
|
||||||
with subsection 6b.
|
|
||||||
|
|
||||||
d) Convey the object code by offering access from a designated
|
|
||||||
place (gratis or for a charge), and offer equivalent access to the
|
|
||||||
Corresponding Source in the same way through the same place at no
|
|
||||||
further charge. You need not require recipients to copy the
|
|
||||||
Corresponding Source along with the object code. If the place to
|
|
||||||
copy the object code is a network server, the Corresponding Source
|
|
||||||
may be on a different server (operated by you or a third party)
|
|
||||||
that supports equivalent copying facilities, provided you maintain
|
|
||||||
clear directions next to the object code saying where to find the
|
|
||||||
Corresponding Source. Regardless of what server hosts the
|
|
||||||
Corresponding Source, you remain obligated to ensure that it is
|
|
||||||
available for as long as needed to satisfy these requirements.
|
|
||||||
|
|
||||||
e) Convey the object code using peer-to-peer transmission, provided
|
|
||||||
you inform other peers where the object code and Corresponding
|
|
||||||
Source of the work are being offered to the general public at no
|
|
||||||
charge under subsection 6d.
|
|
||||||
|
|
||||||
A separable portion of the object code, whose source code is excluded
|
|
||||||
from the Corresponding Source as a System Library, need not be
|
|
||||||
included in conveying the object code work.
|
|
||||||
|
|
||||||
A "User Product" is either (1) a "consumer product", which means any
|
|
||||||
tangible personal property which is normally used for personal, family,
|
|
||||||
or household purposes, or (2) anything designed or sold for incorporation
|
|
||||||
into a dwelling. In determining whether a product is a consumer product,
|
|
||||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
|
||||||
product received by a particular user, "normally used" refers to a
|
|
||||||
typical or common use of that class of product, regardless of the status
|
|
||||||
of the particular user or of the way in which the particular user
|
|
||||||
actually uses, or expects or is expected to use, the product. A product
|
|
||||||
is a consumer product regardless of whether the product has substantial
|
|
||||||
commercial, industrial or non-consumer uses, unless such uses represent
|
|
||||||
the only significant mode of use of the product.
|
|
||||||
|
|
||||||
"Installation Information" for a User Product means any methods,
|
|
||||||
procedures, authorization keys, or other information required to install
|
|
||||||
and execute modified versions of a covered work in that User Product from
|
|
||||||
a modified version of its Corresponding Source. The information must
|
|
||||||
suffice to ensure that the continued functioning of the modified object
|
|
||||||
code is in no case prevented or interfered with solely because
|
|
||||||
modification has been made.
|
|
||||||
|
|
||||||
If you convey an object code work under this section in, or with, or
|
|
||||||
specifically for use in, a User Product, and the conveying occurs as
|
|
||||||
part of a transaction in which the right of possession and use of the
|
|
||||||
User Product is transferred to the recipient in perpetuity or for a
|
|
||||||
fixed term (regardless of how the transaction is characterized), the
|
|
||||||
Corresponding Source conveyed under this section must be accompanied
|
|
||||||
by the Installation Information. But this requirement does not apply
|
|
||||||
if neither you nor any third party retains the ability to install
|
|
||||||
modified object code on the User Product (for example, the work has
|
|
||||||
been installed in ROM).
|
|
||||||
|
|
||||||
The requirement to provide Installation Information does not include a
|
|
||||||
requirement to continue to provide support service, warranty, or updates
|
|
||||||
for a work that has been modified or installed by the recipient, or for
|
|
||||||
the User Product in which it has been modified or installed. Access to a
|
|
||||||
network may be denied when the modification itself materially and
|
|
||||||
adversely affects the operation of the network or violates the rules and
|
|
||||||
protocols for communication across the network.
|
|
||||||
|
|
||||||
Corresponding Source conveyed, and Installation Information provided,
|
|
||||||
in accord with this section must be in a format that is publicly
|
|
||||||
documented (and with an implementation available to the public in
|
|
||||||
source code form), and must require no special password or key for
|
|
||||||
unpacking, reading or copying.
|
|
||||||
|
|
||||||
7. Additional Terms.
|
|
||||||
|
|
||||||
"Additional permissions" are terms that supplement the terms of this
|
|
||||||
License by making exceptions from one or more of its conditions.
|
|
||||||
Additional permissions that are applicable to the entire Program shall
|
|
||||||
be treated as though they were included in this License, to the extent
|
|
||||||
that they are valid under applicable law. If additional permissions
|
|
||||||
apply only to part of the Program, that part may be used separately
|
|
||||||
under those permissions, but the entire Program remains governed by
|
|
||||||
this License without regard to the additional permissions.
|
|
||||||
|
|
||||||
When you convey a copy of a covered work, you may at your option
|
|
||||||
remove any additional permissions from that copy, or from any part of
|
|
||||||
it. (Additional permissions may be written to require their own
|
|
||||||
removal in certain cases when you modify the work.) You may place
|
|
||||||
additional permissions on material, added by you to a covered work,
|
|
||||||
for which you have or can give appropriate copyright permission.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, for material you
|
|
||||||
add to a covered work, you may (if authorized by the copyright holders of
|
|
||||||
that material) supplement the terms of this License with terms:
|
|
||||||
|
|
||||||
a) Disclaiming warranty or limiting liability differently from the
|
|
||||||
terms of sections 15 and 16 of this License; or
|
|
||||||
|
|
||||||
b) Requiring preservation of specified reasonable legal notices or
|
|
||||||
author attributions in that material or in the Appropriate Legal
|
|
||||||
Notices displayed by works containing it; or
|
|
||||||
|
|
||||||
c) Prohibiting misrepresentation of the origin of that material, or
|
|
||||||
requiring that modified versions of such material be marked in
|
|
||||||
reasonable ways as different from the original version; or
|
|
||||||
|
|
||||||
d) Limiting the use for publicity purposes of names of licensors or
|
|
||||||
authors of the material; or
|
|
||||||
|
|
||||||
e) Declining to grant rights under trademark law for use of some
|
|
||||||
trade names, trademarks, or service marks; or
|
|
||||||
|
|
||||||
f) Requiring indemnification of licensors and authors of that
|
|
||||||
material by anyone who conveys the material (or modified versions of
|
|
||||||
it) with contractual assumptions of liability to the recipient, for
|
|
||||||
any liability that these contractual assumptions directly impose on
|
|
||||||
those licensors and authors.
|
|
||||||
|
|
||||||
All other non-permissive additional terms are considered "further
|
|
||||||
restrictions" within the meaning of section 10. If the Program as you
|
|
||||||
received it, or any part of it, contains a notice stating that it is
|
|
||||||
governed by this License along with a term that is a further
|
|
||||||
restriction, you may remove that term. If a license document contains
|
|
||||||
a further restriction but permits relicensing or conveying under this
|
|
||||||
License, you may add to a covered work material governed by the terms
|
|
||||||
of that license document, provided that the further restriction does
|
|
||||||
not survive such relicensing or conveying.
|
|
||||||
|
|
||||||
If you add terms to a covered work in accord with this section, you
|
|
||||||
must place, in the relevant source files, a statement of the
|
|
||||||
additional terms that apply to those files, or a notice indicating
|
|
||||||
where to find the applicable terms.
|
|
||||||
|
|
||||||
Additional terms, permissive or non-permissive, may be stated in the
|
|
||||||
form of a separately written license, or stated as exceptions;
|
|
||||||
the above requirements apply either way.
|
|
||||||
|
|
||||||
8. Termination.
|
|
||||||
|
|
||||||
You may not propagate or modify a covered work except as expressly
|
|
||||||
provided under this License. Any attempt otherwise to propagate or
|
|
||||||
modify it is void, and will automatically terminate your rights under
|
|
||||||
this License (including any patent licenses granted under the third
|
|
||||||
paragraph of section 11).
|
|
||||||
|
|
||||||
However, if you cease all violation of this License, then your
|
|
||||||
license from a particular copyright holder is reinstated (a)
|
|
||||||
provisionally, unless and until the copyright holder explicitly and
|
|
||||||
finally terminates your license, and (b) permanently, if the copyright
|
|
||||||
holder fails to notify you of the violation by some reasonable means
|
|
||||||
prior to 60 days after the cessation.
|
|
||||||
|
|
||||||
Moreover, your license from a particular copyright holder is
|
|
||||||
reinstated permanently if the copyright holder notifies you of the
|
|
||||||
violation by some reasonable means, this is the first time you have
|
|
||||||
received notice of violation of this License (for any work) from that
|
|
||||||
copyright holder, and you cure the violation prior to 30 days after
|
|
||||||
your receipt of the notice.
|
|
||||||
|
|
||||||
Termination of your rights under this section does not terminate the
|
|
||||||
licenses of parties who have received copies or rights from you under
|
|
||||||
this License. If your rights have been terminated and not permanently
|
|
||||||
reinstated, you do not qualify to receive new licenses for the same
|
|
||||||
material under section 10.
|
|
||||||
|
|
||||||
9. Acceptance Not Required for Having Copies.
|
|
||||||
|
|
||||||
You are not required to accept this License in order to receive or
|
|
||||||
run a copy of the Program. Ancillary propagation of a covered work
|
|
||||||
occurring solely as a consequence of using peer-to-peer transmission
|
|
||||||
to receive a copy likewise does not require acceptance. However,
|
|
||||||
nothing other than this License grants you permission to propagate or
|
|
||||||
modify any covered work. These actions infringe copyright if you do
|
|
||||||
not accept this License. Therefore, by modifying or propagating a
|
|
||||||
covered work, you indicate your acceptance of this License to do so.
|
|
||||||
|
|
||||||
10. Automatic Licensing of Downstream Recipients.
|
|
||||||
|
|
||||||
Each time you convey a covered work, the recipient automatically
|
|
||||||
receives a license from the original licensors, to run, modify and
|
|
||||||
propagate that work, subject to this License. You are not responsible
|
|
||||||
for enforcing compliance by third parties with this License.
|
|
||||||
|
|
||||||
An "entity transaction" is a transaction transferring control of an
|
|
||||||
organization, or substantially all assets of one, or subdividing an
|
|
||||||
organization, or merging organizations. If propagation of a covered
|
|
||||||
work results from an entity transaction, each party to that
|
|
||||||
transaction who receives a copy of the work also receives whatever
|
|
||||||
licenses to the work the party's predecessor in interest had or could
|
|
||||||
give under the previous paragraph, plus a right to possession of the
|
|
||||||
Corresponding Source of the work from the predecessor in interest, if
|
|
||||||
the predecessor has it or can get it with reasonable efforts.
|
|
||||||
|
|
||||||
You may not impose any further restrictions on the exercise of the
|
|
||||||
rights granted or affirmed under this License. For example, you may
|
|
||||||
not impose a license fee, royalty, or other charge for exercise of
|
|
||||||
rights granted under this License, and you may not initiate litigation
|
|
||||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
|
||||||
any patent claim is infringed by making, using, selling, offering for
|
|
||||||
sale, or importing the Program or any portion of it.
|
|
||||||
|
|
||||||
11. Patents.
|
|
||||||
|
|
||||||
A "contributor" is a copyright holder who authorizes use under this
|
|
||||||
License of the Program or a work on which the Program is based. The
|
|
||||||
work thus licensed is called the contributor's "contributor version".
|
|
||||||
|
|
||||||
A contributor's "essential patent claims" are all patent claims
|
|
||||||
owned or controlled by the contributor, whether already acquired or
|
|
||||||
hereafter acquired, that would be infringed by some manner, permitted
|
|
||||||
by this License, of making, using, or selling its contributor version,
|
|
||||||
but do not include claims that would be infringed only as a
|
|
||||||
consequence of further modification of the contributor version. For
|
|
||||||
purposes of this definition, "control" includes the right to grant
|
|
||||||
patent sublicenses in a manner consistent with the requirements of
|
|
||||||
this License.
|
|
||||||
|
|
||||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
|
||||||
patent license under the contributor's essential patent claims, to
|
|
||||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
|
||||||
propagate the contents of its contributor version.
|
|
||||||
|
|
||||||
In the following three paragraphs, a "patent license" is any express
|
|
||||||
agreement or commitment, however denominated, not to enforce a patent
|
|
||||||
(such as an express permission to practice a patent or covenant not to
|
|
||||||
sue for patent infringement). To "grant" such a patent license to a
|
|
||||||
party means to make such an agreement or commitment not to enforce a
|
|
||||||
patent against the party.
|
|
||||||
|
|
||||||
If you convey a covered work, knowingly relying on a patent license,
|
|
||||||
and the Corresponding Source of the work is not available for anyone
|
|
||||||
to copy, free of charge and under the terms of this License, through a
|
|
||||||
publicly available network server or other readily accessible means,
|
|
||||||
then you must either (1) cause the Corresponding Source to be so
|
|
||||||
available, or (2) arrange to deprive yourself of the benefit of the
|
|
||||||
patent license for this particular work, or (3) arrange, in a manner
|
|
||||||
consistent with the requirements of this License, to extend the patent
|
|
||||||
license to downstream recipients. "Knowingly relying" means you have
|
|
||||||
actual knowledge that, but for the patent license, your conveying the
|
|
||||||
covered work in a country, or your recipient's use of the covered work
|
|
||||||
in a country, would infringe one or more identifiable patents in that
|
|
||||||
country that you have reason to believe are valid.
|
|
||||||
|
|
||||||
If, pursuant to or in connection with a single transaction or
|
|
||||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
|
||||||
covered work, and grant a patent license to some of the parties
|
|
||||||
receiving the covered work authorizing them to use, propagate, modify
|
|
||||||
or convey a specific copy of the covered work, then the patent license
|
|
||||||
you grant is automatically extended to all recipients of the covered
|
|
||||||
work and works based on it.
|
|
||||||
|
|
||||||
A patent license is "discriminatory" if it does not include within
|
|
||||||
the scope of its coverage, prohibits the exercise of, or is
|
|
||||||
conditioned on the non-exercise of one or more of the rights that are
|
|
||||||
specifically granted under this License. You may not convey a covered
|
|
||||||
work if you are a party to an arrangement with a third party that is
|
|
||||||
in the business of distributing software, under which you make payment
|
|
||||||
to the third party based on the extent of your activity of conveying
|
|
||||||
the work, and under which the third party grants, to any of the
|
|
||||||
parties who would receive the covered work from you, a discriminatory
|
|
||||||
patent license (a) in connection with copies of the covered work
|
|
||||||
conveyed by you (or copies made from those copies), or (b) primarily
|
|
||||||
for and in connection with specific products or compilations that
|
|
||||||
contain the covered work, unless you entered into that arrangement,
|
|
||||||
or that patent license was granted, prior to 28 March 2007.
|
|
||||||
|
|
||||||
Nothing in this License shall be construed as excluding or limiting
|
|
||||||
any implied license or other defenses to infringement that may
|
|
||||||
otherwise be available to you under applicable patent law.
|
|
||||||
|
|
||||||
12. No Surrender of Others' Freedom.
|
|
||||||
|
|
||||||
If conditions are imposed on you (whether by court order, agreement or
|
|
||||||
otherwise) that contradict the conditions of this License, they do not
|
|
||||||
excuse you from the conditions of this License. If you cannot convey a
|
|
||||||
covered work so as to satisfy simultaneously your obligations under this
|
|
||||||
License and any other pertinent obligations, then as a consequence you may
|
|
||||||
not convey it at all. For example, if you agree to terms that obligate you
|
|
||||||
to collect a royalty for further conveying from those to whom you convey
|
|
||||||
the Program, the only way you could satisfy both those terms and this
|
|
||||||
License would be to refrain entirely from conveying the Program.
|
|
||||||
|
|
||||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, if you modify the
|
|
||||||
Program, your modified version must prominently offer all users
|
|
||||||
interacting with it remotely through a computer network (if your version
|
|
||||||
supports such interaction) an opportunity to receive the Corresponding
|
|
||||||
Source of your version by providing access to the Corresponding Source
|
|
||||||
from a network server at no charge, through some standard or customary
|
|
||||||
means of facilitating copying of software. This Corresponding Source
|
|
||||||
shall include the Corresponding Source for any work covered by version 3
|
|
||||||
of the GNU General Public License that is incorporated pursuant to the
|
|
||||||
following paragraph.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, you have
|
|
||||||
permission to link or combine any covered work with a work licensed
|
|
||||||
under version 3 of the GNU General Public License into a single
|
|
||||||
combined work, and to convey the resulting work. The terms of this
|
|
||||||
License will continue to apply to the part which is the covered work,
|
|
||||||
but the work with which it is combined will remain governed by version
|
|
||||||
3 of the GNU General Public License.
|
|
||||||
|
|
||||||
14. Revised Versions of this License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions of
|
|
||||||
the GNU Affero General Public License from time to time. Such new versions
|
|
||||||
will be similar in spirit to the present version, but may differ in detail to
|
|
||||||
address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Program specifies that a certain numbered version of the GNU Affero General
|
|
||||||
Public License "or any later version" applies to it, you have the
|
|
||||||
option of following the terms and conditions either of that numbered
|
|
||||||
version or of any later version published by the Free Software
|
|
||||||
Foundation. If the Program does not specify a version number of the
|
|
||||||
GNU Affero General Public License, you may choose any version ever published
|
|
||||||
by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Program specifies that a proxy can decide which future
|
|
||||||
versions of the GNU Affero General Public License can be used, that proxy's
|
|
||||||
public statement of acceptance of a version permanently authorizes you
|
|
||||||
to choose that version for the Program.
|
|
||||||
|
|
||||||
Later license versions may give you additional or different
|
|
||||||
permissions. However, no additional obligations are imposed on any
|
|
||||||
author or copyright holder as a result of your choosing to follow a
|
|
||||||
later version.
|
|
||||||
|
|
||||||
15. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
|
||||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
|
||||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
|
||||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
|
||||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
|
||||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
|
||||||
|
|
||||||
16. Limitation of Liability.
|
|
||||||
|
|
||||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
|
||||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
|
||||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
|
||||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
|
||||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
|
||||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
|
||||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
|
||||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
|
||||||
SUCH DAMAGES.
|
|
||||||
|
|
||||||
17. Interpretation of Sections 15 and 16.
|
|
||||||
|
|
||||||
If the disclaimer of warranty and limitation of liability provided
|
|
||||||
above cannot be given local legal effect according to their terms,
|
|
||||||
reviewing courts shall apply local law that most closely approximates
|
|
||||||
an absolute waiver of all civil liability in connection with the
|
|
||||||
Program, unless a warranty or assumption of liability accompanies a
|
|
||||||
copy of the Program in return for a fee.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
How to Apply These Terms to Your New Programs
|
|
||||||
|
|
||||||
If you develop a new program, and you want it to be of the greatest
|
|
||||||
possible use to the public, the best way to achieve this is to make it
|
|
||||||
free software which everyone can redistribute and change under these terms.
|
|
||||||
|
|
||||||
To do so, attach the following notices to the program. It is safest
|
|
||||||
to attach them to the start of each source file to most effectively
|
|
||||||
state the exclusion of warranty; and each file should have at least
|
|
||||||
the "copyright" line and a pointer to where the full notice is found.
|
|
||||||
|
|
||||||
<one line to give the program's name and a brief idea of what it does.>
|
|
||||||
Copyright (C) <year> <name of author>
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU Affero General Public License as published
|
|
||||||
by the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU Affero General Public License
|
|
||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
|
||||||
|
|
||||||
If your software can interact with users remotely through a computer
|
|
||||||
network, you should also make sure that it provides a way for users to
|
|
||||||
get its source. For example, if your program is a web application, its
|
|
||||||
interface could display a "Source" link that leads users to an archive
|
|
||||||
of the code. There are many ways you could offer source, and different
|
|
||||||
solutions will be better for different programs; see section 13 for the
|
|
||||||
specific requirements.
|
|
||||||
|
|
||||||
You should also get your employer (if you work as a programmer) or school,
|
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
|
||||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
|
||||||
<https://www.gnu.org/licenses/>.
|
|
||||||
59
Makefile
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
default_target: amd64_frigate
|
||||||
|
|
||||||
|
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
|
||||||
|
|
||||||
|
version:
|
||||||
|
echo "VERSION='0.8.0-$(COMMIT_HASH)'" > frigate/version.py
|
||||||
|
|
||||||
|
web:
|
||||||
|
docker build --tag frigate-web --file docker/Dockerfile.web web/
|
||||||
|
|
||||||
|
amd64_wheels:
|
||||||
|
docker build --tag blakeblackshear/frigate-wheels:1.0.1-amd64 --file docker/Dockerfile.wheels .
|
||||||
|
|
||||||
|
amd64_ffmpeg:
|
||||||
|
docker build --tag blakeblackshear/frigate-ffmpeg:1.1.0-amd64 --file docker/Dockerfile.ffmpeg.amd64 .
|
||||||
|
|
||||||
|
amd64_frigate: version web
|
||||||
|
docker build --tag frigate-base --build-arg ARCH=amd64 --build-arg FFMPEG_VERSION=1.1.0 --build-arg WHEELS_VERSION=1.0.1 --file docker/Dockerfile.base .
|
||||||
|
docker build --tag frigate --file docker/Dockerfile.amd64 .
|
||||||
|
|
||||||
|
amd64_all: amd64_wheels amd64_ffmpeg amd64_frigate
|
||||||
|
|
||||||
|
amd64nvidia_wheels:
|
||||||
|
docker build --tag blakeblackshear/frigate-wheels:1.0.1-amd64nvidia --file docker/Dockerfile.wheels .
|
||||||
|
|
||||||
|
amd64nvidia_ffmpeg:
|
||||||
|
docker build --tag blakeblackshear/frigate-ffmpeg:1.0.0-amd64nvidia --file docker/Dockerfile.ffmpeg.amd64nvidia .
|
||||||
|
|
||||||
|
amd64nvidia_frigate: version web
|
||||||
|
docker build --tag frigate-base --build-arg ARCH=amd64nvidia --build-arg FFMPEG_VERSION=1.0.0 --build-arg WHEELS_VERSION=1.0.1 --file docker/Dockerfile.base .
|
||||||
|
docker build --tag frigate --file docker/Dockerfile.amd64nvidia .
|
||||||
|
|
||||||
|
amd64nvidia_all: amd64nvidia_wheels amd64nvidia_ffmpeg amd64nvidia_frigate
|
||||||
|
|
||||||
|
aarch64_wheels:
|
||||||
|
docker build --tag blakeblackshear/frigate-wheels:1.0.1-aarch64 --file docker/Dockerfile.wheels .
|
||||||
|
|
||||||
|
aarch64_ffmpeg:
|
||||||
|
docker build --tag blakeblackshear/frigate-ffmpeg:1.0.0-aarch64 --file docker/Dockerfile.ffmpeg.aarch64 .
|
||||||
|
|
||||||
|
aarch64_frigate: version web
|
||||||
|
docker build --tag frigate-base --build-arg ARCH=aarch64 --build-arg FFMPEG_VERSION=1.0.0 --build-arg WHEELS_VERSION=1.0.1 --file docker/Dockerfile.base .
|
||||||
|
docker build --tag frigate --file docker/Dockerfile.aarch64 .
|
||||||
|
|
||||||
|
armv7_all: armv7_wheels armv7_ffmpeg armv7_frigate
|
||||||
|
|
||||||
|
armv7_wheels:
|
||||||
|
docker build --tag blakeblackshear/frigate-wheels:1.0.1-armv7 --file docker/Dockerfile.wheels .
|
||||||
|
|
||||||
|
armv7_ffmpeg:
|
||||||
|
docker build --tag blakeblackshear/frigate-ffmpeg:1.0.0-armv7 --file docker/Dockerfile.ffmpeg.armv7 .
|
||||||
|
|
||||||
|
armv7_frigate: version web
|
||||||
|
docker build --tag frigate-base --build-arg ARCH=armv7 --build-arg FFMPEG_VERSION=1.0.0 --build-arg WHEELS_VERSION=1.0.1 --file docker/Dockerfile.base .
|
||||||
|
docker build --tag frigate --file docker/Dockerfile.armv7 .
|
||||||
|
|
||||||
|
armv7_all: armv7_wheels armv7_ffmpeg armv7_frigate
|
||||||
|
|
||||||
|
.PHONY: web
|
||||||
99
benchmark.py
Normal file → Executable file
@@ -1,20 +1,93 @@
|
|||||||
import statistics
|
import os
|
||||||
|
from statistics import mean
|
||||||
|
import multiprocessing as mp
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from edgetpu.detection.engine import DetectionEngine
|
import datetime
|
||||||
|
from frigate.edgetpu import LocalObjectDetector, EdgeTPUProcess, RemoteObjectDetector, load_labels
|
||||||
|
|
||||||
# Path to frozen detection graph. This is the actual model that is used for the object detection.
|
my_frame = np.expand_dims(np.full((300,300,3), 1, np.uint8), axis=0)
|
||||||
PATH_TO_CKPT = '/frozen_inference_graph.pb'
|
labels = load_labels('/labelmap.txt')
|
||||||
|
|
||||||
# Load the edgetpu engine and labels
|
######
|
||||||
engine = DetectionEngine(PATH_TO_CKPT)
|
# Minimal same process runner
|
||||||
|
######
|
||||||
|
# object_detector = LocalObjectDetector()
|
||||||
|
# tensor_input = np.expand_dims(np.full((300,300,3), 0, np.uint8), axis=0)
|
||||||
|
|
||||||
frame = np.zeros((300,300,3), np.uint8)
|
# start = datetime.datetime.now().timestamp()
|
||||||
flattened_frame = np.expand_dims(frame, axis=0).flatten()
|
|
||||||
|
|
||||||
detection_times = []
|
# frame_times = []
|
||||||
|
# for x in range(0, 1000):
|
||||||
|
# start_frame = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
for x in range(0, 1000):
|
# tensor_input[:] = my_frame
|
||||||
objects = engine.DetectWithInputTensor(flattened_frame, threshold=0.1, top_k=3)
|
# detections = object_detector.detect_raw(tensor_input)
|
||||||
detection_times.append(engine.get_inference_time())
|
# parsed_detections = []
|
||||||
|
# for d in detections:
|
||||||
|
# if d[1] < 0.4:
|
||||||
|
# break
|
||||||
|
# parsed_detections.append((
|
||||||
|
# labels[int(d[0])],
|
||||||
|
# float(d[1]),
|
||||||
|
# (d[2], d[3], d[4], d[5])
|
||||||
|
# ))
|
||||||
|
# frame_times.append(datetime.datetime.now().timestamp()-start_frame)
|
||||||
|
|
||||||
print("Average inference time: " + str(statistics.mean(detection_times)))
|
# duration = datetime.datetime.now().timestamp()-start
|
||||||
|
# print(f"Processed for {duration:.2f} seconds.")
|
||||||
|
# print(f"Average frame processing time: {mean(frame_times)*1000:.2f}ms")
|
||||||
|
|
||||||
|
|
||||||
|
def start(id, num_detections, detection_queue, event):
|
||||||
|
object_detector = RemoteObjectDetector(str(id), '/labelmap.txt', detection_queue, event)
|
||||||
|
start = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
frame_times = []
|
||||||
|
for x in range(0, num_detections):
|
||||||
|
start_frame = datetime.datetime.now().timestamp()
|
||||||
|
detections = object_detector.detect(my_frame)
|
||||||
|
frame_times.append(datetime.datetime.now().timestamp()-start_frame)
|
||||||
|
|
||||||
|
duration = datetime.datetime.now().timestamp()-start
|
||||||
|
object_detector.cleanup()
|
||||||
|
print(f"{id} - Processed for {duration:.2f} seconds.")
|
||||||
|
print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
|
||||||
|
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
|
||||||
|
|
||||||
|
######
|
||||||
|
# Separate process runner
|
||||||
|
######
|
||||||
|
# event = mp.Event()
|
||||||
|
# detection_queue = mp.Queue()
|
||||||
|
# edgetpu_process = EdgeTPUProcess(detection_queue, {'1': event}, 'usb:0')
|
||||||
|
|
||||||
|
# start(1, 1000, edgetpu_process.detection_queue, event)
|
||||||
|
# print(f"Average raw inference speed: {edgetpu_process.avg_inference_speed.value*1000:.2f}ms")
|
||||||
|
|
||||||
|
####
|
||||||
|
# Multiple camera processes
|
||||||
|
####
|
||||||
|
camera_processes = []
|
||||||
|
|
||||||
|
events = {}
|
||||||
|
for x in range(0, 10):
|
||||||
|
events[str(x)] = mp.Event()
|
||||||
|
detection_queue = mp.Queue()
|
||||||
|
edgetpu_process_1 = EdgeTPUProcess(detection_queue, events, 'usb:0')
|
||||||
|
edgetpu_process_2 = EdgeTPUProcess(detection_queue, events, 'usb:1')
|
||||||
|
|
||||||
|
for x in range(0, 10):
|
||||||
|
camera_process = mp.Process(target=start, args=(x, 300, detection_queue, events[str(x)]))
|
||||||
|
camera_process.daemon = True
|
||||||
|
camera_processes.append(camera_process)
|
||||||
|
|
||||||
|
start_time = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
for p in camera_processes:
|
||||||
|
p.start()
|
||||||
|
|
||||||
|
for p in camera_processes:
|
||||||
|
p.join()
|
||||||
|
|
||||||
|
duration = datetime.datetime.now().timestamp()-start_time
|
||||||
|
print(f"Total - Processed for {duration:.2f} seconds.")
|
||||||
|
Before Width: | Height: | Size: 1.8 MiB |
@@ -1,65 +0,0 @@
|
|||||||
web_port: 5000
|
|
||||||
|
|
||||||
mqtt:
|
|
||||||
host: mqtt.server.com
|
|
||||||
topic_prefix: frigate
|
|
||||||
# user: username # Optional -- Uncomment for use
|
|
||||||
# password: password # Optional -- Uncomment for use
|
|
||||||
|
|
||||||
cameras:
|
|
||||||
back:
|
|
||||||
rtsp:
|
|
||||||
user: viewer
|
|
||||||
host: 10.0.10.10
|
|
||||||
port: 554
|
|
||||||
# values that begin with a "$" will be replaced with environment variable
|
|
||||||
password: $RTSP_PASSWORD
|
|
||||||
path: /cam/realmonitor?channel=1&subtype=2
|
|
||||||
|
|
||||||
################
|
|
||||||
## Optional mask. Must be the same dimensions as your video feed.
|
|
||||||
## The mask works by looking at the bottom center of the bounding box for the detected
|
|
||||||
## person in the image. If that pixel in the mask is a black pixel, it ignores it as a
|
|
||||||
## false positive. In my mask, the grass and driveway visible from my backdoor camera
|
|
||||||
## are white. The garage doors, sky, and trees (anywhere it would be impossible for a
|
|
||||||
## person to stand) are black.
|
|
||||||
################
|
|
||||||
# mask: back-mask.bmp
|
|
||||||
|
|
||||||
################
|
|
||||||
# Allows you to limit the framerate within frigate for cameras that do not support
|
|
||||||
# custom framerates. A value of 1 tells frigate to look at every frame, 2 every 2nd frame,
|
|
||||||
# 3 every 3rd frame, etc.
|
|
||||||
################
|
|
||||||
take_frame: 1
|
|
||||||
|
|
||||||
################
|
|
||||||
# Optional hardware acceleration parameters for ffmpeg. If your hardware supports it, it can
|
|
||||||
# greatly reduce the CPU power used to decode the video stream. You will need to determine which
|
|
||||||
# parameters work for your specific hardware. These may work for those with Intel hardware that
|
|
||||||
# supports QuickSync.
|
|
||||||
################
|
|
||||||
# ffmpeg_hwaccel_args:
|
|
||||||
# - -hwaccel
|
|
||||||
# - vaapi
|
|
||||||
# - -hwaccel_device
|
|
||||||
# - /dev/dri/renderD128
|
|
||||||
# - -hwaccel_output_format
|
|
||||||
# - yuv420p
|
|
||||||
|
|
||||||
regions:
|
|
||||||
- size: 350
|
|
||||||
x_offset: 0
|
|
||||||
y_offset: 300
|
|
||||||
min_person_area: 5000
|
|
||||||
threshold: 0.5
|
|
||||||
- size: 400
|
|
||||||
x_offset: 350
|
|
||||||
y_offset: 250
|
|
||||||
min_person_area: 2000
|
|
||||||
threshold: 0.5
|
|
||||||
- size: 400
|
|
||||||
x_offset: 750
|
|
||||||
y_offset: 250
|
|
||||||
min_person_area: 2000
|
|
||||||
threshold: 0.5
|
|
||||||
@@ -1,99 +0,0 @@
|
|||||||
import cv2
|
|
||||||
import time
|
|
||||||
import queue
|
|
||||||
import yaml
|
|
||||||
import numpy as np
|
|
||||||
from flask import Flask, Response, make_response
|
|
||||||
import paho.mqtt.client as mqtt
|
|
||||||
|
|
||||||
from frigate.video import Camera
|
|
||||||
from frigate.object_detection import PreppedQueueProcessor
|
|
||||||
|
|
||||||
with open('/config/config.yml') as f:
|
|
||||||
CONFIG = yaml.safe_load(f)
|
|
||||||
|
|
||||||
MQTT_HOST = CONFIG['mqtt']['host']
|
|
||||||
MQTT_PORT = CONFIG.get('mqtt', {}).get('port', 1883)
|
|
||||||
MQTT_TOPIC_PREFIX = CONFIG.get('mqtt', {}).get('topic_prefix', 'frigate')
|
|
||||||
MQTT_USER = CONFIG.get('mqtt', {}).get('user')
|
|
||||||
MQTT_PASS = CONFIG.get('mqtt', {}).get('password')
|
|
||||||
|
|
||||||
WEB_PORT = CONFIG.get('web_port', 5000)
|
|
||||||
DEBUG = (CONFIG.get('debug', '0') == '1')
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# connect to mqtt and setup last will
|
|
||||||
def on_connect(client, userdata, flags, rc):
|
|
||||||
print("On connect called")
|
|
||||||
if rc != 0:
|
|
||||||
if rc == 3:
|
|
||||||
print ("MQTT Server unavailable")
|
|
||||||
elif rc == 4:
|
|
||||||
print ("MQTT Bad username or password")
|
|
||||||
elif rc == 5:
|
|
||||||
print ("MQTT Not authorized")
|
|
||||||
else:
|
|
||||||
print ("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
|
|
||||||
# publish a message to signal that the service is running
|
|
||||||
client.publish(MQTT_TOPIC_PREFIX+'/available', 'online', retain=True)
|
|
||||||
client = mqtt.Client(client_id="frigate")
|
|
||||||
client.on_connect = on_connect
|
|
||||||
client.will_set(MQTT_TOPIC_PREFIX+'/available', payload='offline', qos=1, retain=True)
|
|
||||||
if not MQTT_USER is None:
|
|
||||||
client.username_pw_set(MQTT_USER, password=MQTT_PASS)
|
|
||||||
client.connect(MQTT_HOST, MQTT_PORT, 60)
|
|
||||||
client.loop_start()
|
|
||||||
|
|
||||||
# Queue for prepped frames, max size set to (number of cameras * 5)
|
|
||||||
max_queue_size = len(CONFIG['cameras'].items())*5
|
|
||||||
prepped_frame_queue = queue.Queue(max_queue_size)
|
|
||||||
|
|
||||||
cameras = {}
|
|
||||||
for name, config in CONFIG['cameras'].items():
|
|
||||||
cameras[name] = Camera(name, config, prepped_frame_queue, client, MQTT_TOPIC_PREFIX)
|
|
||||||
|
|
||||||
prepped_queue_processor = PreppedQueueProcessor(
|
|
||||||
cameras,
|
|
||||||
prepped_frame_queue
|
|
||||||
)
|
|
||||||
prepped_queue_processor.start()
|
|
||||||
|
|
||||||
for name, camera in cameras.items():
|
|
||||||
camera.start()
|
|
||||||
print("Capture process for {}: {}".format(name, camera.get_capture_pid()))
|
|
||||||
|
|
||||||
# create a flask app that encodes frames a mjpeg on demand
|
|
||||||
app = Flask(__name__)
|
|
||||||
|
|
||||||
@app.route('/<camera_name>/best_person.jpg')
|
|
||||||
def best_person(camera_name):
|
|
||||||
best_person_frame = cameras[camera_name].get_best_person()
|
|
||||||
if best_person_frame is None:
|
|
||||||
best_person_frame = np.zeros((720,1280,3), np.uint8)
|
|
||||||
ret, jpg = cv2.imencode('.jpg', best_person_frame)
|
|
||||||
response = make_response(jpg.tobytes())
|
|
||||||
response.headers['Content-Type'] = 'image/jpg'
|
|
||||||
return response
|
|
||||||
|
|
||||||
@app.route('/<camera_name>')
|
|
||||||
def mjpeg_feed(camera_name):
|
|
||||||
# return a multipart response
|
|
||||||
return Response(imagestream(camera_name),
|
|
||||||
mimetype='multipart/x-mixed-replace; boundary=frame')
|
|
||||||
|
|
||||||
def imagestream(camera_name):
|
|
||||||
while True:
|
|
||||||
# max out at 5 FPS
|
|
||||||
time.sleep(0.2)
|
|
||||||
frame = cameras[camera_name].get_current_frame_with_objects()
|
|
||||||
# encode the image into a jpg
|
|
||||||
ret, jpg = cv2.imencode('.jpg', frame)
|
|
||||||
yield (b'--frame\r\n'
|
|
||||||
b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
|
|
||||||
|
|
||||||
app.run(host='0.0.0.0', port=WEB_PORT, debug=False)
|
|
||||||
|
|
||||||
camera.join()
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
BIN
diagram.png
|
Before Width: | Height: | Size: 283 KiB |
22
docker/Dockerfile.aarch64
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
FROM frigate-base
|
||||||
|
LABEL maintainer "blakeb@blakeshome.com"
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
# Install packages for apt repo
|
||||||
|
RUN apt-get -qq update \
|
||||||
|
&& apt-get -qq install --no-install-recommends -y \
|
||||||
|
# ffmpeg runtime dependencies
|
||||||
|
libgomp1 \
|
||||||
|
# runtime dependencies
|
||||||
|
libopenexr24 \
|
||||||
|
libgstreamer1.0-0 \
|
||||||
|
libgstreamer-plugins-base1.0-0 \
|
||||||
|
libopenblas-base \
|
||||||
|
libjpeg-turbo8 \
|
||||||
|
libpng16-16 \
|
||||||
|
libtiff5 \
|
||||||
|
libdc1394-22 \
|
||||||
|
## Tensorflow lite
|
||||||
|
&& pip3 install https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_aarch64.whl \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
|
&& (apt-get autoremove -y; apt-get autoclean -y)
|
||||||
18
docker/Dockerfile.amd64
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
FROM frigate-base
|
||||||
|
LABEL maintainer "blakeb@blakeshome.com"
|
||||||
|
|
||||||
|
# By default, use the i965 driver
|
||||||
|
ENV LIBVA_DRIVER_NAME=i965
|
||||||
|
# Install packages for apt repo
|
||||||
|
RUN apt-get -qq update \
|
||||||
|
&& apt-get -qq install --no-install-recommends -y \
|
||||||
|
# ffmpeg dependencies
|
||||||
|
libgomp1 \
|
||||||
|
# VAAPI drivers for Intel hardware accel
|
||||||
|
libva-drm2 libva2 libmfx1 i965-va-driver vainfo intel-media-va-driver mesa-va-drivers \
|
||||||
|
## Tensorflow lite
|
||||||
|
&& wget -q https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
|
||||||
|
&& python3.8 -m pip install tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
|
||||||
|
&& rm tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
|
&& (apt-get autoremove -y; apt-get autoclean -y)
|
||||||
47
docker/Dockerfile.amd64nvidia
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
FROM frigate-base
|
||||||
|
LABEL maintainer "blakeb@blakeshome.com"
|
||||||
|
|
||||||
|
# Install packages for apt repo
|
||||||
|
RUN apt-get -qq update \
|
||||||
|
&& apt-get -qq install --no-install-recommends -y \
|
||||||
|
# ffmpeg dependencies
|
||||||
|
libgomp1 \
|
||||||
|
## Tensorflow lite
|
||||||
|
&& wget -q https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
|
||||||
|
&& python3.8 -m pip install tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
|
||||||
|
&& rm tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
|
&& (apt-get autoremove -y; apt-get autoclean -y)
|
||||||
|
|
||||||
|
|
||||||
|
# nvidia layer (see https://gitlab.com/nvidia/container-images/cuda/blob/master/dist/11.1/ubuntu20.04-x86_64/base/Dockerfile)
|
||||||
|
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,video
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
gnupg2 curl ca-certificates && \
|
||||||
|
curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/7fa2af80.pub | apt-key add - && \
|
||||||
|
echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64 /" > /etc/apt/sources.list.d/cuda.list && \
|
||||||
|
echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu2004/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list && \
|
||||||
|
apt-get purge --autoremove -y curl \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
ENV CUDA_VERSION 11.1.1
|
||||||
|
|
||||||
|
# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
cuda-cudart-11-1=11.1.74-1 \
|
||||||
|
cuda-compat-11-1 \
|
||||||
|
&& ln -s cuda-11.1 /usr/local/cuda && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Required for nvidia-docker v1
|
||||||
|
RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \
|
||||||
|
echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf
|
||||||
|
|
||||||
|
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH}
|
||||||
|
ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||||
|
|
||||||
|
# nvidia-container-runtime
|
||||||
|
ENV NVIDIA_VISIBLE_DEVICES all
|
||||||
|
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,video
|
||||||
|
ENV NVIDIA_REQUIRE_CUDA "cuda>=11.1 brand=tesla,driver>=418,driver<419 brand=tesla,driver>=440,driver<441 brand=tesla,driver>=450,driver<451"
|
||||||
24
docker/Dockerfile.armv7
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
FROM frigate-base
|
||||||
|
LABEL maintainer "blakeb@blakeshome.com"
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
# Install packages for apt repo
|
||||||
|
RUN apt-get -qq update \
|
||||||
|
&& apt-get -qq install --no-install-recommends -y \
|
||||||
|
# ffmpeg runtime dependencies
|
||||||
|
libgomp1 \
|
||||||
|
# runtime dependencies
|
||||||
|
libopenexr24 \
|
||||||
|
libgstreamer1.0-0 \
|
||||||
|
libgstreamer-plugins-base1.0-0 \
|
||||||
|
libopenblas-base \
|
||||||
|
libjpeg-turbo8 \
|
||||||
|
libpng16-16 \
|
||||||
|
libtiff5 \
|
||||||
|
libdc1394-22 \
|
||||||
|
libaom0 \
|
||||||
|
libx265-179 \
|
||||||
|
## Tensorflow lite
|
||||||
|
&& pip3 install https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_armv7l.whl \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
|
&& (apt-get autoremove -y; apt-get autoclean -y)
|
||||||
58
docker/Dockerfile.base
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
ARG ARCH=amd64
|
||||||
|
ARG WHEELS_VERSION
|
||||||
|
ARG FFMPEG_VERSION
|
||||||
|
FROM blakeblackshear/frigate-wheels:${WHEELS_VERSION}-${ARCH} as wheels
|
||||||
|
FROM blakeblackshear/frigate-ffmpeg:${FFMPEG_VERSION}-${ARCH} as ffmpeg
|
||||||
|
FROM frigate-web as web
|
||||||
|
|
||||||
|
FROM ubuntu:20.04
|
||||||
|
LABEL maintainer "blakeb@blakeshome.com"
|
||||||
|
|
||||||
|
COPY --from=ffmpeg /usr/local /usr/local/
|
||||||
|
|
||||||
|
COPY --from=wheels /wheels/. /wheels/
|
||||||
|
|
||||||
|
ENV FLASK_ENV=development
|
||||||
|
# ENV FONTCONFIG_PATH=/etc/fonts
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
# Install packages for apt repo
|
||||||
|
RUN apt-get -qq update \
|
||||||
|
&& apt-get upgrade -y \
|
||||||
|
&& apt-get -qq install --no-install-recommends -y \
|
||||||
|
gnupg wget unzip tzdata nginx libnginx-mod-rtmp \
|
||||||
|
&& apt-get -qq install --no-install-recommends -y \
|
||||||
|
python3-pip \
|
||||||
|
&& pip3 install -U /wheels/*.whl \
|
||||||
|
&& APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn apt-key adv --fetch-keys https://packages.cloud.google.com/apt/doc/apt-key.gpg \
|
||||||
|
&& echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" > /etc/apt/sources.list.d/coral-edgetpu.list \
|
||||||
|
&& echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections \
|
||||||
|
&& apt-get -qq update && apt-get -qq install --no-install-recommends -y \
|
||||||
|
libedgetpu1-max=15.0 \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /wheels \
|
||||||
|
&& (apt-get autoremove -y; apt-get autoclean -y)
|
||||||
|
|
||||||
|
RUN pip3 install \
|
||||||
|
peewee_migrate \
|
||||||
|
zeroconf \
|
||||||
|
voluptuous
|
||||||
|
|
||||||
|
COPY nginx/nginx.conf /etc/nginx/nginx.conf
|
||||||
|
|
||||||
|
# get model and labels
|
||||||
|
COPY labelmap.txt /labelmap.txt
|
||||||
|
RUN wget -q https://github.com/google-coral/test_data/raw/master/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite -O /edgetpu_model.tflite
|
||||||
|
RUN wget -q https://github.com/google-coral/test_data/raw/master/ssdlite_mobiledet_coco_qat_postprocess.tflite -O /cpu_model.tflite
|
||||||
|
|
||||||
|
WORKDIR /opt/frigate/
|
||||||
|
ADD frigate frigate/
|
||||||
|
ADD migrations migrations/
|
||||||
|
|
||||||
|
COPY --from=web /opt/frigate/build web/
|
||||||
|
|
||||||
|
COPY run.sh /run.sh
|
||||||
|
RUN chmod +x /run.sh
|
||||||
|
|
||||||
|
EXPOSE 5000
|
||||||
|
EXPOSE 1935
|
||||||
|
|
||||||
|
CMD ["/run.sh"]
|
||||||
474
docker/Dockerfile.ffmpeg.aarch64
Normal file
@@ -0,0 +1,474 @@
|
|||||||
|
# inspired by:
|
||||||
|
# https://github.com/collelog/ffmpeg/blob/master/4.3.1-alpine-rpi4-arm64v8.Dockerfile
|
||||||
|
# https://github.com/mmastrac/ffmpeg-omx-rpi-docker/blob/master/Dockerfile
|
||||||
|
# https://github.com/jrottenberg/ffmpeg/pull/158/files
|
||||||
|
# https://github.com/jrottenberg/ffmpeg/pull/239
|
||||||
|
FROM ubuntu:20.04 AS base
|
||||||
|
|
||||||
|
WORKDIR /tmp/workdir
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
RUN apt-get -yqq update && \
|
||||||
|
apt-get install -yq --no-install-recommends ca-certificates expat libgomp1 && \
|
||||||
|
apt-get autoremove -y && \
|
||||||
|
apt-get clean -y
|
||||||
|
|
||||||
|
FROM base as build
|
||||||
|
|
||||||
|
ENV FFMPEG_VERSION=4.3.1 \
|
||||||
|
AOM_VERSION=v1.0.0 \
|
||||||
|
FDKAAC_VERSION=0.1.5 \
|
||||||
|
FREETYPE_VERSION=2.5.5 \
|
||||||
|
FRIBIDI_VERSION=0.19.7 \
|
||||||
|
KVAZAAR_VERSION=1.2.0 \
|
||||||
|
LAME_VERSION=3.100 \
|
||||||
|
LIBPTHREAD_STUBS_VERSION=0.4 \
|
||||||
|
LIBVIDSTAB_VERSION=1.1.0 \
|
||||||
|
LIBXCB_VERSION=1.13.1 \
|
||||||
|
XCBPROTO_VERSION=1.13 \
|
||||||
|
OGG_VERSION=1.3.2 \
|
||||||
|
OPENCOREAMR_VERSION=0.1.5 \
|
||||||
|
OPUS_VERSION=1.2 \
|
||||||
|
OPENJPEG_VERSION=2.1.2 \
|
||||||
|
THEORA_VERSION=1.1.1 \
|
||||||
|
VORBIS_VERSION=1.3.5 \
|
||||||
|
VPX_VERSION=1.8.0 \
|
||||||
|
WEBP_VERSION=1.0.2 \
|
||||||
|
X264_VERSION=20170226-2245-stable \
|
||||||
|
X265_VERSION=3.1.1 \
|
||||||
|
XAU_VERSION=1.0.9 \
|
||||||
|
XORG_MACROS_VERSION=1.19.2 \
|
||||||
|
XPROTO_VERSION=7.0.31 \
|
||||||
|
XVID_VERSION=1.3.4 \
|
||||||
|
LIBZMQ_VERSION=4.3.2 \
|
||||||
|
SRC=/usr/local
|
||||||
|
|
||||||
|
ARG FREETYPE_SHA256SUM="5d03dd76c2171a7601e9ce10551d52d4471cf92cd205948e60289251daddffa8 freetype-2.5.5.tar.gz"
|
||||||
|
ARG FRIBIDI_SHA256SUM="3fc96fa9473bd31dcb5500bdf1aa78b337ba13eb8c301e7c28923fea982453a8 0.19.7.tar.gz"
|
||||||
|
ARG LIBVIDSTAB_SHA256SUM="14d2a053e56edad4f397be0cb3ef8eb1ec3150404ce99a426c4eb641861dc0bb v1.1.0.tar.gz"
|
||||||
|
ARG OGG_SHA256SUM="e19ee34711d7af328cb26287f4137e70630e7261b17cbe3cd41011d73a654692 libogg-1.3.2.tar.gz"
|
||||||
|
ARG OPUS_SHA256SUM="77db45a87b51578fbc49555ef1b10926179861d854eb2613207dc79d9ec0a9a9 opus-1.2.tar.gz"
|
||||||
|
ARG THEORA_SHA256SUM="40952956c47811928d1e7922cda3bc1f427eb75680c3c37249c91e949054916b libtheora-1.1.1.tar.gz"
|
||||||
|
ARG VORBIS_SHA256SUM="6efbcecdd3e5dfbf090341b485da9d176eb250d893e3eb378c428a2db38301ce libvorbis-1.3.5.tar.gz"
|
||||||
|
ARG XVID_SHA256SUM="4e9fd62728885855bc5007fe1be58df42e5e274497591fec37249e1052ae316f xvidcore-1.3.4.tar.gz"
|
||||||
|
ARG LIBZMQ_SHA256SUM="02ecc88466ae38cf2c8d79f09cfd2675ba299a439680b64ade733e26a349edeb v4.3.2.tar.gz"
|
||||||
|
|
||||||
|
|
||||||
|
ARG LD_LIBRARY_PATH=/opt/ffmpeg/lib
|
||||||
|
ARG MAKEFLAGS="-j2"
|
||||||
|
ARG PKG_CONFIG_PATH="/opt/ffmpeg/share/pkgconfig:/opt/ffmpeg/lib/pkgconfig:/opt/ffmpeg/lib64/pkgconfig"
|
||||||
|
ARG PREFIX=/opt/ffmpeg
|
||||||
|
ARG LD_LIBRARY_PATH="/opt/ffmpeg/lib:/opt/ffmpeg/lib64:/usr/lib64:/usr/lib:/lib64:/lib"
|
||||||
|
|
||||||
|
|
||||||
|
RUN buildDeps="autoconf \
|
||||||
|
automake \
|
||||||
|
cmake \
|
||||||
|
curl \
|
||||||
|
bzip2 \
|
||||||
|
libexpat1-dev \
|
||||||
|
g++ \
|
||||||
|
gcc \
|
||||||
|
git \
|
||||||
|
gperf \
|
||||||
|
libtool \
|
||||||
|
make \
|
||||||
|
nasm \
|
||||||
|
perl \
|
||||||
|
pkg-config \
|
||||||
|
python \
|
||||||
|
libssl-dev \
|
||||||
|
yasm \
|
||||||
|
linux-headers-raspi2 \
|
||||||
|
libomxil-bellagio-dev \
|
||||||
|
zlib1g-dev" && \
|
||||||
|
apt-get -yqq update && \
|
||||||
|
apt-get install -yq --no-install-recommends ${buildDeps}
|
||||||
|
## opencore-amr https://sourceforge.net/projects/opencore-amr/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/opencore-amr && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://versaweb.dl.sourceforge.net/project/opencore-amr/opencore-amr/opencore-amr-${OPENCOREAMR_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## x264 http://www.videolan.org/developers/x264.html
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/x264 && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://download.videolan.org/pub/videolan/x264/snapshots/x264-snapshot-${X264_VERSION}.tar.bz2 | \
|
||||||
|
tar -jx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared --enable-pic --disable-cli && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### x265 http://x265.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/x265 && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://download.videolan.org/pub/videolan/x265/x265_${X265_VERSION}.tar.gz | \
|
||||||
|
tar -zx && \
|
||||||
|
cd x265_${X265_VERSION}/build/linux && \
|
||||||
|
sed -i "/-DEXTRA_LIB/ s/$/ -DCMAKE_INSTALL_PREFIX=\${PREFIX}/" multilib.sh && \
|
||||||
|
sed -i "/^cmake/ s/$/ -DENABLE_CLI=OFF/" multilib.sh && \
|
||||||
|
export CXXFLAGS="${CXXFLAGS} -fPIC" && \
|
||||||
|
./multilib.sh && \
|
||||||
|
make -C 8bit install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libogg https://www.xiph.org/ogg/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ogg && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/ogg/libogg-${OGG_VERSION}.tar.gz && \
|
||||||
|
echo ${OGG_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libogg-${OGG_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libopus https://www.opus-codec.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/opus && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://archive.mozilla.org/pub/opus/opus-${OPUS_VERSION}.tar.gz && \
|
||||||
|
echo ${OPUS_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f opus-${OPUS_VERSION}.tar.gz && \
|
||||||
|
autoreconf -fiv && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libvorbis https://xiph.org/vorbis/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vorbis && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/vorbis/libvorbis-${VORBIS_VERSION}.tar.gz && \
|
||||||
|
echo ${VORBIS_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libvorbis-${VORBIS_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --with-ogg="${PREFIX}" --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libtheora http://www.theora.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/theora && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/theora/libtheora-${THEORA_VERSION}.tar.gz && \
|
||||||
|
echo ${THEORA_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libtheora-${THEORA_VERSION}.tar.gz && \
|
||||||
|
curl -sL 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD' -o config.guess && \
|
||||||
|
curl -sL 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD' -o config.sub && \
|
||||||
|
./configure --prefix="${PREFIX}" --with-ogg="${PREFIX}" --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libvpx https://www.webmproject.org/code/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vpx && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://codeload.github.com/webmproject/libvpx/tar.gz/v${VPX_VERSION} | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-vp8 --enable-vp9 --enable-vp9-highbitdepth --enable-pic --enable-shared \
|
||||||
|
--disable-debug --disable-examples --disable-docs --disable-install-bins && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libwebp https://developers.google.com/speed/webp/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vebp && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://storage.googleapis.com/downloads.webmproject.org/releases/webp/libwebp-${WEBP_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libmp3lame http://lame.sourceforge.net/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/lame && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://versaweb.dl.sourceforge.net/project/lame/lame/$(echo ${LAME_VERSION} | sed -e 's/[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z-]*\)/\1.\2/')/lame-${LAME_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --bindir="${PREFIX}/bin" --enable-shared --enable-nasm --disable-frontend && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### xvid https://www.xvid.com/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xvid && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xvid.org/downloads/xvidcore-${XVID_VERSION}.tar.gz && \
|
||||||
|
echo ${XVID_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx -f xvidcore-${XVID_VERSION}.tar.gz && \
|
||||||
|
cd xvidcore/build/generic && \
|
||||||
|
./configure --prefix="${PREFIX}" --bindir="${PREFIX}/bin" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### fdk-aac https://github.com/mstorsjo/fdk-aac
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/fdk-aac && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://github.com/mstorsjo/fdk-aac/archive/v${FDKAAC_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
autoreconf -fiv && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared --datadir="${DIR}" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## openjpeg https://github.com/uclouvain/openjpeg
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/openjpeg && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://github.com/uclouvain/openjpeg/archive/v${OPENJPEG_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
export CFLAGS="${CFLAGS} -DPNG_ARM_NEON_OPT=0" && \
|
||||||
|
cmake -DBUILD_THIRDPARTY:BOOL=ON -DCMAKE_INSTALL_PREFIX="${PREFIX}" . && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## freetype https://www.freetype.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/freetype && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://download.savannah.gnu.org/releases/freetype/freetype-${FREETYPE_VERSION}.tar.gz && \
|
||||||
|
echo ${FREETYPE_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f freetype-${FREETYPE_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## libvstab https://github.com/georgmartius/vid.stab
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vid.stab && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/georgmartius/vid.stab/archive/v${LIBVIDSTAB_VERSION}.tar.gz && \
|
||||||
|
echo ${LIBVIDSTAB_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f v${LIBVIDSTAB_VERSION}.tar.gz && \
|
||||||
|
cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" . && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## fridibi https://www.fribidi.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/fribidi && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/fribidi/fribidi/archive/${FRIBIDI_VERSION}.tar.gz && \
|
||||||
|
echo ${FRIBIDI_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f ${FRIBIDI_VERSION}.tar.gz && \
|
||||||
|
sed -i 's/^SUBDIRS =.*/SUBDIRS=gen.tab charset lib bin/' Makefile.am && \
|
||||||
|
./bootstrap --no-config --auto && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make -j1 && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## kvazaar https://github.com/ultravideo/kvazaar
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/kvazaar && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/ultravideo/kvazaar/archive/v${KVAZAAR_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f v${KVAZAAR_VERSION}.tar.gz && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/aom && \
|
||||||
|
git clone --branch ${AOM_VERSION} --depth 1 https://aomedia.googlesource.com/aom ${DIR} ; \
|
||||||
|
cd ${DIR} ; \
|
||||||
|
rm -rf CMakeCache.txt CMakeFiles ; \
|
||||||
|
mkdir -p ./aom_build ; \
|
||||||
|
cd ./aom_build ; \
|
||||||
|
cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" -DBUILD_SHARED_LIBS=1 ..; \
|
||||||
|
make ; \
|
||||||
|
make install ; \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libxcb (and supporting libraries) for screen capture https://xcb.freedesktop.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xorg-macros && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive//individual/util/util-macros-${XORG_MACROS_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f util-macros-${XORG_MACROS_VERSION}.tar.gz && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xproto && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive/individual/proto/xproto-${XPROTO_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f xproto-${XPROTO_VERSION}.tar.gz && \
|
||||||
|
curl -sL 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD' -o config.guess && \
|
||||||
|
curl -sL 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD' -o config.sub && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libXau && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive/individual/lib/libXau-${XAU_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libXau-${XAU_VERSION}.tar.gz && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libpthread-stubs && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/libpthread-stubs-${LIBPTHREAD_STUBS_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libpthread-stubs-${LIBPTHREAD_STUBS_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libxcb-proto && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/xcb-proto-${XCBPROTO_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f xcb-proto-${XCBPROTO_VERSION}.tar.gz && \
|
||||||
|
ACLOCAL_PATH="${PREFIX}/share/aclocal" ./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libxcb && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/libxcb-${LIBXCB_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libxcb-${LIBXCB_VERSION}.tar.gz && \
|
||||||
|
ACLOCAL_PATH="${PREFIX}/share/aclocal" ./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libzmq https://github.com/zeromq/libzmq/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libzmq && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/zeromq/libzmq/archive/v${LIBZMQ_VERSION}.tar.gz && \
|
||||||
|
echo ${LIBZMQ_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -xz --strip-components=1 -f v${LIBZMQ_VERSION}.tar.gz && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make check && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## ffmpeg https://ffmpeg.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ffmpeg && mkdir -p ${DIR} && cd ${DIR} && \
|
||||||
|
curl -sLO https://ffmpeg.org/releases/ffmpeg-${FFMPEG_VERSION}.tar.bz2 && \
|
||||||
|
tar -jx --strip-components=1 -f ffmpeg-${FFMPEG_VERSION}.tar.bz2
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ffmpeg && mkdir -p ${DIR} && cd ${DIR} && \
|
||||||
|
./configure \
|
||||||
|
--disable-debug \
|
||||||
|
--disable-doc \
|
||||||
|
--disable-ffplay \
|
||||||
|
--enable-shared \
|
||||||
|
--enable-avresample \
|
||||||
|
--enable-libopencore-amrnb \
|
||||||
|
--enable-libopencore-amrwb \
|
||||||
|
--enable-gpl \
|
||||||
|
--enable-libfreetype \
|
||||||
|
--enable-libvidstab \
|
||||||
|
--enable-libmp3lame \
|
||||||
|
--enable-libopus \
|
||||||
|
--enable-libtheora \
|
||||||
|
--enable-libvorbis \
|
||||||
|
--enable-libvpx \
|
||||||
|
--enable-libwebp \
|
||||||
|
--enable-libxcb \
|
||||||
|
--enable-libx265 \
|
||||||
|
--enable-libxvid \
|
||||||
|
--enable-libx264 \
|
||||||
|
--enable-nonfree \
|
||||||
|
--enable-openssl \
|
||||||
|
--enable-libfdk_aac \
|
||||||
|
--enable-postproc \
|
||||||
|
--enable-small \
|
||||||
|
--enable-version3 \
|
||||||
|
--enable-libzmq \
|
||||||
|
--extra-libs=-ldl \
|
||||||
|
--prefix="${PREFIX}" \
|
||||||
|
--enable-libopenjpeg \
|
||||||
|
--enable-libkvazaar \
|
||||||
|
--enable-libaom \
|
||||||
|
--extra-libs=-lpthread \
|
||||||
|
# --enable-omx \
|
||||||
|
# --enable-omx-rpi \
|
||||||
|
# --enable-mmal \
|
||||||
|
--enable-v4l2_m2m \
|
||||||
|
--enable-neon \
|
||||||
|
--extra-cflags="-I${PREFIX}/include" \
|
||||||
|
--extra-ldflags="-L${PREFIX}/lib" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
make tools/zmqsend && cp tools/zmqsend ${PREFIX}/bin/ && \
|
||||||
|
make distclean && \
|
||||||
|
hash -r && \
|
||||||
|
cd tools && \
|
||||||
|
make qt-faststart && cp qt-faststart ${PREFIX}/bin/
|
||||||
|
|
||||||
|
## cleanup
|
||||||
|
RUN \
|
||||||
|
ldd ${PREFIX}/bin/ffmpeg | grep opt/ffmpeg | cut -d ' ' -f 3 | xargs -i cp {} /usr/local/lib/ && \
|
||||||
|
for lib in /usr/local/lib/*.so.*; do ln -s "${lib##*/}" "${lib%%.so.*}".so; done && \
|
||||||
|
cp ${PREFIX}/bin/* /usr/local/bin/ && \
|
||||||
|
cp -r ${PREFIX}/share/ffmpeg /usr/local/share/ && \
|
||||||
|
LD_LIBRARY_PATH=/usr/local/lib ffmpeg -buildconf && \
|
||||||
|
cp -r ${PREFIX}/include/libav* ${PREFIX}/include/libpostproc ${PREFIX}/include/libsw* /usr/local/include && \
|
||||||
|
mkdir -p /usr/local/lib/pkgconfig && \
|
||||||
|
for pc in ${PREFIX}/lib/pkgconfig/libav*.pc ${PREFIX}/lib/pkgconfig/libpostproc.pc ${PREFIX}/lib/pkgconfig/libsw*.pc; do \
|
||||||
|
sed "s:${PREFIX}:/usr/local:g" <"$pc" >/usr/local/lib/pkgconfig/"${pc##*/}"; \
|
||||||
|
done
|
||||||
|
|
||||||
|
FROM base AS release
|
||||||
|
|
||||||
|
ENV LD_LIBRARY_PATH=/usr/local/lib:/usr/local/lib64:/usr/lib:/usr/lib64:/lib:/lib64
|
||||||
|
|
||||||
|
CMD ["--help"]
|
||||||
|
ENTRYPOINT ["ffmpeg"]
|
||||||
|
|
||||||
|
COPY --from=build /usr/local /usr/local/
|
||||||
|
|
||||||
|
# Run ffmpeg with -c:v h264_v4l2m2m to enable HW accell for decoding on raspberry pi4 64-bit
|
||||||
468
docker/Dockerfile.ffmpeg.amd64
Normal file
@@ -0,0 +1,468 @@
|
|||||||
|
# inspired by:
|
||||||
|
# https://github.com/collelog/ffmpeg/blob/master/4.3.1-alpine-rpi4-arm64v8.Dockerfile
|
||||||
|
# https://github.com/jrottenberg/ffmpeg/pull/158/files
|
||||||
|
# https://github.com/jrottenberg/ffmpeg/pull/239
|
||||||
|
FROM ubuntu:20.04 AS base
|
||||||
|
|
||||||
|
WORKDIR /tmp/workdir
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
RUN apt-get -yqq update && \
|
||||||
|
apt-get install -yq --no-install-recommends ca-certificates expat libgomp1 && \
|
||||||
|
apt-get autoremove -y && \
|
||||||
|
apt-get clean -y
|
||||||
|
|
||||||
|
FROM base as build
|
||||||
|
|
||||||
|
ENV FFMPEG_VERSION=4.3.1 \
|
||||||
|
AOM_VERSION=v1.0.0 \
|
||||||
|
FDKAAC_VERSION=0.1.5 \
|
||||||
|
FREETYPE_VERSION=2.5.5 \
|
||||||
|
FRIBIDI_VERSION=0.19.7 \
|
||||||
|
KVAZAAR_VERSION=1.2.0 \
|
||||||
|
LAME_VERSION=3.100 \
|
||||||
|
LIBPTHREAD_STUBS_VERSION=0.4 \
|
||||||
|
LIBVIDSTAB_VERSION=1.1.0 \
|
||||||
|
LIBXCB_VERSION=1.13.1 \
|
||||||
|
XCBPROTO_VERSION=1.13 \
|
||||||
|
OGG_VERSION=1.3.2 \
|
||||||
|
OPENCOREAMR_VERSION=0.1.5 \
|
||||||
|
OPUS_VERSION=1.2 \
|
||||||
|
OPENJPEG_VERSION=2.1.2 \
|
||||||
|
THEORA_VERSION=1.1.1 \
|
||||||
|
VORBIS_VERSION=1.3.5 \
|
||||||
|
VPX_VERSION=1.8.0 \
|
||||||
|
WEBP_VERSION=1.0.2 \
|
||||||
|
X264_VERSION=20170226-2245-stable \
|
||||||
|
X265_VERSION=3.1.1 \
|
||||||
|
XAU_VERSION=1.0.9 \
|
||||||
|
XORG_MACROS_VERSION=1.19.2 \
|
||||||
|
XPROTO_VERSION=7.0.31 \
|
||||||
|
XVID_VERSION=1.3.4 \
|
||||||
|
LIBZMQ_VERSION=4.3.2 \
|
||||||
|
SRC=/usr/local
|
||||||
|
|
||||||
|
ARG FREETYPE_SHA256SUM="5d03dd76c2171a7601e9ce10551d52d4471cf92cd205948e60289251daddffa8 freetype-2.5.5.tar.gz"
|
||||||
|
ARG FRIBIDI_SHA256SUM="3fc96fa9473bd31dcb5500bdf1aa78b337ba13eb8c301e7c28923fea982453a8 0.19.7.tar.gz"
|
||||||
|
ARG LIBVIDSTAB_SHA256SUM="14d2a053e56edad4f397be0cb3ef8eb1ec3150404ce99a426c4eb641861dc0bb v1.1.0.tar.gz"
|
||||||
|
ARG OGG_SHA256SUM="e19ee34711d7af328cb26287f4137e70630e7261b17cbe3cd41011d73a654692 libogg-1.3.2.tar.gz"
|
||||||
|
ARG OPUS_SHA256SUM="77db45a87b51578fbc49555ef1b10926179861d854eb2613207dc79d9ec0a9a9 opus-1.2.tar.gz"
|
||||||
|
ARG THEORA_SHA256SUM="40952956c47811928d1e7922cda3bc1f427eb75680c3c37249c91e949054916b libtheora-1.1.1.tar.gz"
|
||||||
|
ARG VORBIS_SHA256SUM="6efbcecdd3e5dfbf090341b485da9d176eb250d893e3eb378c428a2db38301ce libvorbis-1.3.5.tar.gz"
|
||||||
|
ARG XVID_SHA256SUM="4e9fd62728885855bc5007fe1be58df42e5e274497591fec37249e1052ae316f xvidcore-1.3.4.tar.gz"
|
||||||
|
ARG LIBZMQ_SHA256SUM="02ecc88466ae38cf2c8d79f09cfd2675ba299a439680b64ade733e26a349edeb v4.3.2.tar.gz"
|
||||||
|
|
||||||
|
|
||||||
|
ARG LD_LIBRARY_PATH=/opt/ffmpeg/lib
|
||||||
|
ARG MAKEFLAGS="-j2"
|
||||||
|
ARG PKG_CONFIG_PATH="/opt/ffmpeg/share/pkgconfig:/opt/ffmpeg/lib/pkgconfig:/opt/ffmpeg/lib64/pkgconfig"
|
||||||
|
ARG PREFIX=/opt/ffmpeg
|
||||||
|
ARG LD_LIBRARY_PATH="/opt/ffmpeg/lib:/opt/ffmpeg/lib64:/usr/lib64:/usr/lib:/lib64:/lib"
|
||||||
|
|
||||||
|
|
||||||
|
RUN buildDeps="autoconf \
|
||||||
|
automake \
|
||||||
|
cmake \
|
||||||
|
curl \
|
||||||
|
bzip2 \
|
||||||
|
libexpat1-dev \
|
||||||
|
g++ \
|
||||||
|
gcc \
|
||||||
|
git \
|
||||||
|
gperf \
|
||||||
|
libtool \
|
||||||
|
make \
|
||||||
|
nasm \
|
||||||
|
perl \
|
||||||
|
pkg-config \
|
||||||
|
python \
|
||||||
|
libssl-dev \
|
||||||
|
yasm \
|
||||||
|
libva-dev \
|
||||||
|
libmfx-dev \
|
||||||
|
zlib1g-dev" && \
|
||||||
|
apt-get -yqq update && \
|
||||||
|
apt-get install -yq --no-install-recommends ${buildDeps}
|
||||||
|
## opencore-amr https://sourceforge.net/projects/opencore-amr/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/opencore-amr && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://versaweb.dl.sourceforge.net/project/opencore-amr/opencore-amr/opencore-amr-${OPENCOREAMR_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## x264 http://www.videolan.org/developers/x264.html
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/x264 && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://download.videolan.org/pub/videolan/x264/snapshots/x264-snapshot-${X264_VERSION}.tar.bz2 | \
|
||||||
|
tar -jx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared --enable-pic --disable-cli && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### x265 http://x265.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/x265 && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://download.videolan.org/pub/videolan/x265/x265_${X265_VERSION}.tar.gz | \
|
||||||
|
tar -zx && \
|
||||||
|
cd x265_${X265_VERSION}/build/linux && \
|
||||||
|
sed -i "/-DEXTRA_LIB/ s/$/ -DCMAKE_INSTALL_PREFIX=\${PREFIX}/" multilib.sh && \
|
||||||
|
sed -i "/^cmake/ s/$/ -DENABLE_CLI=OFF/" multilib.sh && \
|
||||||
|
./multilib.sh && \
|
||||||
|
make -C 8bit install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libogg https://www.xiph.org/ogg/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ogg && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/ogg/libogg-${OGG_VERSION}.tar.gz && \
|
||||||
|
echo ${OGG_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libogg-${OGG_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libopus https://www.opus-codec.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/opus && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://archive.mozilla.org/pub/opus/opus-${OPUS_VERSION}.tar.gz && \
|
||||||
|
echo ${OPUS_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f opus-${OPUS_VERSION}.tar.gz && \
|
||||||
|
autoreconf -fiv && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libvorbis https://xiph.org/vorbis/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vorbis && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/vorbis/libvorbis-${VORBIS_VERSION}.tar.gz && \
|
||||||
|
echo ${VORBIS_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libvorbis-${VORBIS_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --with-ogg="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libtheora http://www.theora.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/theora && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/theora/libtheora-${THEORA_VERSION}.tar.gz && \
|
||||||
|
echo ${THEORA_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libtheora-${THEORA_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --with-ogg="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libvpx https://www.webmproject.org/code/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vpx && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://codeload.github.com/webmproject/libvpx/tar.gz/v${VPX_VERSION} | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-vp8 --enable-vp9 --enable-vp9-highbitdepth --enable-pic --enable-shared \
|
||||||
|
--disable-debug --disable-examples --disable-docs --disable-install-bins && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libwebp https://developers.google.com/speed/webp/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vebp && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://storage.googleapis.com/downloads.webmproject.org/releases/webp/libwebp-${WEBP_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libmp3lame http://lame.sourceforge.net/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/lame && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://versaweb.dl.sourceforge.net/project/lame/lame/$(echo ${LAME_VERSION} | sed -e 's/[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z-]*\)/\1.\2/')/lame-${LAME_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --bindir="${PREFIX}/bin" --enable-shared --enable-nasm --disable-frontend && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### xvid https://www.xvid.com/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xvid && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xvid.org/downloads/xvidcore-${XVID_VERSION}.tar.gz && \
|
||||||
|
echo ${XVID_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx -f xvidcore-${XVID_VERSION}.tar.gz && \
|
||||||
|
cd xvidcore/build/generic && \
|
||||||
|
./configure --prefix="${PREFIX}" --bindir="${PREFIX}/bin" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### fdk-aac https://github.com/mstorsjo/fdk-aac
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/fdk-aac && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://github.com/mstorsjo/fdk-aac/archive/v${FDKAAC_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
autoreconf -fiv && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared --datadir="${DIR}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## openjpeg https://github.com/uclouvain/openjpeg
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/openjpeg && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://github.com/uclouvain/openjpeg/archive/v${OPENJPEG_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
cmake -DBUILD_THIRDPARTY:BOOL=ON -DCMAKE_INSTALL_PREFIX="${PREFIX}" . && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## freetype https://www.freetype.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/freetype && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://download.savannah.gnu.org/releases/freetype/freetype-${FREETYPE_VERSION}.tar.gz && \
|
||||||
|
echo ${FREETYPE_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f freetype-${FREETYPE_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## libvstab https://github.com/georgmartius/vid.stab
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vid.stab && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/georgmartius/vid.stab/archive/v${LIBVIDSTAB_VERSION}.tar.gz && \
|
||||||
|
echo ${LIBVIDSTAB_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f v${LIBVIDSTAB_VERSION}.tar.gz && \
|
||||||
|
cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" . && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## fridibi https://www.fribidi.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/fribidi && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/fribidi/fribidi/archive/${FRIBIDI_VERSION}.tar.gz && \
|
||||||
|
echo ${FRIBIDI_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f ${FRIBIDI_VERSION}.tar.gz && \
|
||||||
|
sed -i 's/^SUBDIRS =.*/SUBDIRS=gen.tab charset lib bin/' Makefile.am && \
|
||||||
|
./bootstrap --no-config --auto && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make -j1 && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## kvazaar https://github.com/ultravideo/kvazaar
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/kvazaar && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/ultravideo/kvazaar/archive/v${KVAZAAR_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f v${KVAZAAR_VERSION}.tar.gz && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/aom && \
|
||||||
|
git clone --branch ${AOM_VERSION} --depth 1 https://aomedia.googlesource.com/aom ${DIR} ; \
|
||||||
|
cd ${DIR} ; \
|
||||||
|
rm -rf CMakeCache.txt CMakeFiles ; \
|
||||||
|
mkdir -p ./aom_build ; \
|
||||||
|
cd ./aom_build ; \
|
||||||
|
cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" -DBUILD_SHARED_LIBS=1 ..; \
|
||||||
|
make ; \
|
||||||
|
make install ; \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libxcb (and supporting libraries) for screen capture https://xcb.freedesktop.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xorg-macros && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive//individual/util/util-macros-${XORG_MACROS_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f util-macros-${XORG_MACROS_VERSION}.tar.gz && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xproto && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive/individual/proto/xproto-${XPROTO_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f xproto-${XPROTO_VERSION}.tar.gz && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libXau && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive/individual/lib/libXau-${XAU_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libXau-${XAU_VERSION}.tar.gz && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libpthread-stubs && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/libpthread-stubs-${LIBPTHREAD_STUBS_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libpthread-stubs-${LIBPTHREAD_STUBS_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libxcb-proto && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/xcb-proto-${XCBPROTO_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f xcb-proto-${XCBPROTO_VERSION}.tar.gz && \
|
||||||
|
ACLOCAL_PATH="${PREFIX}/share/aclocal" ./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libxcb && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/libxcb-${LIBXCB_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libxcb-${LIBXCB_VERSION}.tar.gz && \
|
||||||
|
ACLOCAL_PATH="${PREFIX}/share/aclocal" ./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libzmq https://github.com/zeromq/libzmq/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libzmq && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/zeromq/libzmq/archive/v${LIBZMQ_VERSION}.tar.gz && \
|
||||||
|
echo ${LIBZMQ_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -xz --strip-components=1 -f v${LIBZMQ_VERSION}.tar.gz && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make check && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## ffmpeg https://ffmpeg.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ffmpeg && mkdir -p ${DIR} && cd ${DIR} && \
|
||||||
|
curl -sLO https://ffmpeg.org/releases/ffmpeg-${FFMPEG_VERSION}.tar.bz2 && \
|
||||||
|
tar -jx --strip-components=1 -f ffmpeg-${FFMPEG_VERSION}.tar.bz2
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ffmpeg && mkdir -p ${DIR} && cd ${DIR} && \
|
||||||
|
./configure \
|
||||||
|
--disable-debug \
|
||||||
|
--disable-doc \
|
||||||
|
--disable-ffplay \
|
||||||
|
--enable-shared \
|
||||||
|
--enable-avresample \
|
||||||
|
--enable-libopencore-amrnb \
|
||||||
|
--enable-libopencore-amrwb \
|
||||||
|
--enable-gpl \
|
||||||
|
--enable-libfreetype \
|
||||||
|
--enable-libvidstab \
|
||||||
|
--enable-libmfx \
|
||||||
|
--enable-libmp3lame \
|
||||||
|
--enable-libopus \
|
||||||
|
--enable-libtheora \
|
||||||
|
--enable-libvorbis \
|
||||||
|
--enable-libvpx \
|
||||||
|
--enable-libwebp \
|
||||||
|
--enable-libxcb \
|
||||||
|
--enable-libx265 \
|
||||||
|
--enable-libxvid \
|
||||||
|
--enable-libx264 \
|
||||||
|
--enable-nonfree \
|
||||||
|
--enable-openssl \
|
||||||
|
--enable-libfdk_aac \
|
||||||
|
--enable-postproc \
|
||||||
|
--enable-small \
|
||||||
|
--enable-version3 \
|
||||||
|
--enable-libzmq \
|
||||||
|
--extra-libs=-ldl \
|
||||||
|
--prefix="${PREFIX}" \
|
||||||
|
--enable-libopenjpeg \
|
||||||
|
--enable-libkvazaar \
|
||||||
|
--enable-libaom \
|
||||||
|
--extra-libs=-lpthread \
|
||||||
|
--enable-vaapi \
|
||||||
|
--extra-cflags="-I${PREFIX}/include" \
|
||||||
|
--extra-ldflags="-L${PREFIX}/lib" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
make tools/zmqsend && cp tools/zmqsend ${PREFIX}/bin/ && \
|
||||||
|
make distclean && \
|
||||||
|
hash -r && \
|
||||||
|
cd tools && \
|
||||||
|
make qt-faststart && cp qt-faststart ${PREFIX}/bin/
|
||||||
|
|
||||||
|
## cleanup
|
||||||
|
RUN \
|
||||||
|
ldd ${PREFIX}/bin/ffmpeg | grep opt/ffmpeg | cut -d ' ' -f 3 | xargs -i cp {} /usr/local/lib/ && \
|
||||||
|
for lib in /usr/local/lib/*.so.*; do ln -s "${lib##*/}" "${lib%%.so.*}".so; done && \
|
||||||
|
cp ${PREFIX}/bin/* /usr/local/bin/ && \
|
||||||
|
cp -r ${PREFIX}/share/ffmpeg /usr/local/share/ && \
|
||||||
|
LD_LIBRARY_PATH=/usr/local/lib ffmpeg -buildconf && \
|
||||||
|
cp -r ${PREFIX}/include/libav* ${PREFIX}/include/libpostproc ${PREFIX}/include/libsw* /usr/local/include && \
|
||||||
|
mkdir -p /usr/local/lib/pkgconfig && \
|
||||||
|
for pc in ${PREFIX}/lib/pkgconfig/libav*.pc ${PREFIX}/lib/pkgconfig/libpostproc.pc ${PREFIX}/lib/pkgconfig/libsw*.pc; do \
|
||||||
|
sed "s:${PREFIX}:/usr/local:g" <"$pc" >/usr/local/lib/pkgconfig/"${pc##*/}"; \
|
||||||
|
done
|
||||||
|
|
||||||
|
FROM base AS release
|
||||||
|
|
||||||
|
ENV LD_LIBRARY_PATH=/usr/local/lib:/usr/local/lib64:/usr/lib:/usr/lib64:/lib:/lib64
|
||||||
|
|
||||||
|
CMD ["--help"]
|
||||||
|
ENTRYPOINT ["ffmpeg"]
|
||||||
|
|
||||||
|
COPY --from=build /usr/local /usr/local/
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
apt-get update -y && \
|
||||||
|
apt-get install -y --no-install-recommends libva-drm2 libva2 i965-va-driver mesa-va-drivers && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
549
docker/Dockerfile.ffmpeg.amd64nvidia
Normal file
@@ -0,0 +1,549 @@
|
|||||||
|
# inspired by https://github.com/jrottenberg/ffmpeg/blob/master/docker-images/4.3/ubuntu1804/Dockerfile
|
||||||
|
|
||||||
|
# ffmpeg - http://ffmpeg.org/download.html
|
||||||
|
#
|
||||||
|
# From https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu
|
||||||
|
#
|
||||||
|
# https://hub.docker.com/r/jrottenberg/ffmpeg/
|
||||||
|
#
|
||||||
|
#
|
||||||
|
|
||||||
|
FROM nvidia/cuda:11.1-devel-ubuntu20.04 AS devel-base
|
||||||
|
|
||||||
|
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,video
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
WORKDIR /tmp/workdir
|
||||||
|
|
||||||
|
RUN apt-get -yqq update && \
|
||||||
|
apt-get install -yq --no-install-recommends ca-certificates expat libgomp1 && \
|
||||||
|
apt-get autoremove -y && \
|
||||||
|
apt-get clean -y
|
||||||
|
|
||||||
|
FROM nvidia/cuda:11.1-runtime-ubuntu20.04 AS runtime-base
|
||||||
|
|
||||||
|
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,video
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
WORKDIR /tmp/workdir
|
||||||
|
|
||||||
|
RUN apt-get -yqq update && \
|
||||||
|
apt-get install -yq --no-install-recommends ca-certificates expat libgomp1 libxcb-shape0-dev && \
|
||||||
|
apt-get autoremove -y && \
|
||||||
|
apt-get clean -y
|
||||||
|
|
||||||
|
|
||||||
|
FROM devel-base as build
|
||||||
|
|
||||||
|
ENV NVIDIA_HEADERS_VERSION=9.1.23.1
|
||||||
|
|
||||||
|
ENV FFMPEG_VERSION=4.3.1 \
|
||||||
|
AOM_VERSION=v1.0.0 \
|
||||||
|
FDKAAC_VERSION=0.1.5 \
|
||||||
|
FREETYPE_VERSION=2.5.5 \
|
||||||
|
FRIBIDI_VERSION=0.19.7 \
|
||||||
|
KVAZAAR_VERSION=1.2.0 \
|
||||||
|
LAME_VERSION=3.100 \
|
||||||
|
LIBPTHREAD_STUBS_VERSION=0.4 \
|
||||||
|
LIBVIDSTAB_VERSION=1.1.0 \
|
||||||
|
LIBXCB_VERSION=1.13.1 \
|
||||||
|
XCBPROTO_VERSION=1.13 \
|
||||||
|
OGG_VERSION=1.3.2 \
|
||||||
|
OPENCOREAMR_VERSION=0.1.5 \
|
||||||
|
OPUS_VERSION=1.2 \
|
||||||
|
OPENJPEG_VERSION=2.1.2 \
|
||||||
|
THEORA_VERSION=1.1.1 \
|
||||||
|
VORBIS_VERSION=1.3.5 \
|
||||||
|
VPX_VERSION=1.8.0 \
|
||||||
|
WEBP_VERSION=1.0.2 \
|
||||||
|
X264_VERSION=20170226-2245-stable \
|
||||||
|
X265_VERSION=3.1.1 \
|
||||||
|
XAU_VERSION=1.0.9 \
|
||||||
|
XORG_MACROS_VERSION=1.19.2 \
|
||||||
|
XPROTO_VERSION=7.0.31 \
|
||||||
|
XVID_VERSION=1.3.4 \
|
||||||
|
LIBZMQ_VERSION=4.3.2 \
|
||||||
|
LIBSRT_VERSION=1.4.1 \
|
||||||
|
LIBARIBB24_VERSION=1.0.3 \
|
||||||
|
LIBPNG_VERSION=1.6.9 \
|
||||||
|
SRC=/usr/local
|
||||||
|
|
||||||
|
ARG FREETYPE_SHA256SUM="5d03dd76c2171a7601e9ce10551d52d4471cf92cd205948e60289251daddffa8 freetype-2.5.5.tar.gz"
|
||||||
|
ARG FRIBIDI_SHA256SUM="3fc96fa9473bd31dcb5500bdf1aa78b337ba13eb8c301e7c28923fea982453a8 0.19.7.tar.gz"
|
||||||
|
ARG LIBVIDSTAB_SHA256SUM="14d2a053e56edad4f397be0cb3ef8eb1ec3150404ce99a426c4eb641861dc0bb v1.1.0.tar.gz"
|
||||||
|
ARG OGG_SHA256SUM="e19ee34711d7af328cb26287f4137e70630e7261b17cbe3cd41011d73a654692 libogg-1.3.2.tar.gz"
|
||||||
|
ARG OPUS_SHA256SUM="77db45a87b51578fbc49555ef1b10926179861d854eb2613207dc79d9ec0a9a9 opus-1.2.tar.gz"
|
||||||
|
ARG THEORA_SHA256SUM="40952956c47811928d1e7922cda3bc1f427eb75680c3c37249c91e949054916b libtheora-1.1.1.tar.gz"
|
||||||
|
ARG VORBIS_SHA256SUM="6efbcecdd3e5dfbf090341b485da9d176eb250d893e3eb378c428a2db38301ce libvorbis-1.3.5.tar.gz"
|
||||||
|
ARG XVID_SHA256SUM="4e9fd62728885855bc5007fe1be58df42e5e274497591fec37249e1052ae316f xvidcore-1.3.4.tar.gz"
|
||||||
|
ARG LIBZMQ_SHA256SUM="02ecc88466ae38cf2c8d79f09cfd2675ba299a439680b64ade733e26a349edeb v4.3.2.tar.gz"
|
||||||
|
ARG LIBARIBB24_SHA256SUM="f61560738926e57f9173510389634d8c06cabedfa857db4b28fb7704707ff128 v1.0.3.tar.gz"
|
||||||
|
|
||||||
|
|
||||||
|
ARG LD_LIBRARY_PATH=/opt/ffmpeg/lib
|
||||||
|
ARG MAKEFLAGS="-j2"
|
||||||
|
ARG PKG_CONFIG_PATH="/opt/ffmpeg/share/pkgconfig:/opt/ffmpeg/lib/pkgconfig:/opt/ffmpeg/lib64/pkgconfig"
|
||||||
|
ARG PREFIX=/opt/ffmpeg
|
||||||
|
ARG LD_LIBRARY_PATH="/opt/ffmpeg/lib:/opt/ffmpeg/lib64"
|
||||||
|
|
||||||
|
|
||||||
|
RUN buildDeps="autoconf \
|
||||||
|
automake \
|
||||||
|
cmake \
|
||||||
|
curl \
|
||||||
|
bzip2 \
|
||||||
|
libexpat1-dev \
|
||||||
|
g++ \
|
||||||
|
gcc \
|
||||||
|
git \
|
||||||
|
gperf \
|
||||||
|
libtool \
|
||||||
|
make \
|
||||||
|
nasm \
|
||||||
|
perl \
|
||||||
|
pkg-config \
|
||||||
|
python \
|
||||||
|
libssl-dev \
|
||||||
|
yasm \
|
||||||
|
zlib1g-dev" && \
|
||||||
|
apt-get -yqq update && \
|
||||||
|
apt-get install -yq --no-install-recommends ${buildDeps}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/nv-codec-headers && \
|
||||||
|
git clone https://github.com/FFmpeg/nv-codec-headers ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
git checkout n${NVIDIA_HEADERS_VERSION} && \
|
||||||
|
make PREFIX="${PREFIX}" && \
|
||||||
|
make install PREFIX="${PREFIX}" && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## opencore-amr https://sourceforge.net/projects/opencore-amr/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/opencore-amr && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://versaweb.dl.sourceforge.net/project/opencore-amr/opencore-amr/opencore-amr-${OPENCOREAMR_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## x264 http://www.videolan.org/developers/x264.html
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/x264 && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://download.videolan.org/pub/videolan/x264/snapshots/x264-snapshot-${X264_VERSION}.tar.bz2 | \
|
||||||
|
tar -jx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared --enable-pic --disable-cli && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### x265 http://x265.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/x265 && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://download.videolan.org/pub/videolan/x265/x265_${X265_VERSION}.tar.gz | \
|
||||||
|
tar -zx && \
|
||||||
|
cd x265_${X265_VERSION}/build/linux && \
|
||||||
|
sed -i "/-DEXTRA_LIB/ s/$/ -DCMAKE_INSTALL_PREFIX=\${PREFIX}/" multilib.sh && \
|
||||||
|
sed -i "/^cmake/ s/$/ -DENABLE_CLI=OFF/" multilib.sh && \
|
||||||
|
./multilib.sh && \
|
||||||
|
make -C 8bit install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libogg https://www.xiph.org/ogg/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ogg && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/ogg/libogg-${OGG_VERSION}.tar.gz && \
|
||||||
|
echo ${OGG_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libogg-${OGG_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libopus https://www.opus-codec.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/opus && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://archive.mozilla.org/pub/opus/opus-${OPUS_VERSION}.tar.gz && \
|
||||||
|
echo ${OPUS_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f opus-${OPUS_VERSION}.tar.gz && \
|
||||||
|
autoreconf -fiv && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libvorbis https://xiph.org/vorbis/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vorbis && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/vorbis/libvorbis-${VORBIS_VERSION}.tar.gz && \
|
||||||
|
echo ${VORBIS_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libvorbis-${VORBIS_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --with-ogg="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libtheora http://www.theora.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/theora && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/theora/libtheora-${THEORA_VERSION}.tar.gz && \
|
||||||
|
echo ${THEORA_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libtheora-${THEORA_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --with-ogg="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libvpx https://www.webmproject.org/code/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vpx && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://codeload.github.com/webmproject/libvpx/tar.gz/v${VPX_VERSION} | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-vp8 --enable-vp9 --enable-vp9-highbitdepth --enable-pic --enable-shared \
|
||||||
|
--disable-debug --disable-examples --disable-docs --disable-install-bins && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libwebp https://developers.google.com/speed/webp/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vebp && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://storage.googleapis.com/downloads.webmproject.org/releases/webp/libwebp-${WEBP_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libmp3lame http://lame.sourceforge.net/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/lame && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://versaweb.dl.sourceforge.net/project/lame/lame/$(echo ${LAME_VERSION} | sed -e 's/[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z-]*\)/\1.\2/')/lame-${LAME_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --bindir="${PREFIX}/bin" --enable-shared --enable-nasm --disable-frontend && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### xvid https://www.xvid.com/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xvid && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xvid.org/downloads/xvidcore-${XVID_VERSION}.tar.gz && \
|
||||||
|
echo ${XVID_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx -f xvidcore-${XVID_VERSION}.tar.gz && \
|
||||||
|
cd xvidcore/build/generic && \
|
||||||
|
./configure --prefix="${PREFIX}" --bindir="${PREFIX}/bin" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### fdk-aac https://github.com/mstorsjo/fdk-aac
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/fdk-aac && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://github.com/mstorsjo/fdk-aac/archive/v${FDKAAC_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
autoreconf -fiv && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared --datadir="${DIR}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## openjpeg https://github.com/uclouvain/openjpeg
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/openjpeg && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://github.com/uclouvain/openjpeg/archive/v${OPENJPEG_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
cmake -DBUILD_THIRDPARTY:BOOL=ON -DCMAKE_INSTALL_PREFIX="${PREFIX}" . && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## freetype https://www.freetype.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/freetype && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://download.savannah.gnu.org/releases/freetype/freetype-${FREETYPE_VERSION}.tar.gz && \
|
||||||
|
echo ${FREETYPE_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f freetype-${FREETYPE_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## libvstab https://github.com/georgmartius/vid.stab
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vid.stab && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/georgmartius/vid.stab/archive/v${LIBVIDSTAB_VERSION}.tar.gz && \
|
||||||
|
echo ${LIBVIDSTAB_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f v${LIBVIDSTAB_VERSION}.tar.gz && \
|
||||||
|
cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" . && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## fridibi https://www.fribidi.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/fribidi && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/fribidi/fribidi/archive/${FRIBIDI_VERSION}.tar.gz && \
|
||||||
|
echo ${FRIBIDI_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f ${FRIBIDI_VERSION}.tar.gz && \
|
||||||
|
sed -i 's/^SUBDIRS =.*/SUBDIRS=gen.tab charset lib bin/' Makefile.am && \
|
||||||
|
./bootstrap --no-config --auto && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make -j1 && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## kvazaar https://github.com/ultravideo/kvazaar
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/kvazaar && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/ultravideo/kvazaar/archive/v${KVAZAAR_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f v${KVAZAAR_VERSION}.tar.gz && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/aom && \
|
||||||
|
git clone --branch ${AOM_VERSION} --depth 1 https://aomedia.googlesource.com/aom ${DIR} ; \
|
||||||
|
cd ${DIR} ; \
|
||||||
|
rm -rf CMakeCache.txt CMakeFiles ; \
|
||||||
|
mkdir -p ./aom_build ; \
|
||||||
|
cd ./aom_build ; \
|
||||||
|
cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" -DBUILD_SHARED_LIBS=1 ..; \
|
||||||
|
make ; \
|
||||||
|
make install ; \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libxcb (and supporting libraries) for screen capture https://xcb.freedesktop.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xorg-macros && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive//individual/util/util-macros-${XORG_MACROS_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f util-macros-${XORG_MACROS_VERSION}.tar.gz && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xproto && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive/individual/proto/xproto-${XPROTO_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f xproto-${XPROTO_VERSION}.tar.gz && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libXau && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive/individual/lib/libXau-${XAU_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libXau-${XAU_VERSION}.tar.gz && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libpthread-stubs && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/libpthread-stubs-${LIBPTHREAD_STUBS_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libpthread-stubs-${LIBPTHREAD_STUBS_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libxcb-proto && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/xcb-proto-${XCBPROTO_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f xcb-proto-${XCBPROTO_VERSION}.tar.gz && \
|
||||||
|
ACLOCAL_PATH="${PREFIX}/share/aclocal" ./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libxcb && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/libxcb-${LIBXCB_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libxcb-${LIBXCB_VERSION}.tar.gz && \
|
||||||
|
ACLOCAL_PATH="${PREFIX}/share/aclocal" ./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libzmq https://github.com/zeromq/libzmq/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libzmq && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/zeromq/libzmq/archive/v${LIBZMQ_VERSION}.tar.gz && \
|
||||||
|
echo ${LIBZMQ_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -xz --strip-components=1 -f v${LIBZMQ_VERSION}.tar.gz && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make check && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libsrt https://github.com/Haivision/srt
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/srt && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/Haivision/srt/archive/v${LIBSRT_VERSION}.tar.gz && \
|
||||||
|
tar -xz --strip-components=1 -f v${LIBSRT_VERSION}.tar.gz && \
|
||||||
|
cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" . && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libpng
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/png && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
git clone https://git.code.sf.net/p/libpng/code ${DIR} -b v${LIBPNG_VERSION} --depth 1 && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make check && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libaribb24
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/b24 && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/nkoriyama/aribb24/archive/v${LIBARIBB24_VERSION}.tar.gz && \
|
||||||
|
echo ${LIBARIBB24_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -xz --strip-components=1 -f v${LIBARIBB24_VERSION}.tar.gz && \
|
||||||
|
autoreconf -fiv && \
|
||||||
|
./configure CFLAGS="-I${PREFIX}/include -fPIC" --prefix="${PREFIX}" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## ffmpeg https://ffmpeg.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ffmpeg && mkdir -p ${DIR} && cd ${DIR} && \
|
||||||
|
curl -sLO https://ffmpeg.org/releases/ffmpeg-${FFMPEG_VERSION}.tar.bz2 && \
|
||||||
|
tar -jx --strip-components=1 -f ffmpeg-${FFMPEG_VERSION}.tar.bz2
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ffmpeg && mkdir -p ${DIR} && cd ${DIR} && \
|
||||||
|
./configure \
|
||||||
|
--disable-debug \
|
||||||
|
--disable-doc \
|
||||||
|
--disable-ffplay \
|
||||||
|
--enable-shared \
|
||||||
|
--enable-avresample \
|
||||||
|
--enable-libopencore-amrnb \
|
||||||
|
--enable-libopencore-amrwb \
|
||||||
|
--enable-gpl \
|
||||||
|
--enable-libfreetype \
|
||||||
|
--enable-libvidstab \
|
||||||
|
--enable-libmp3lame \
|
||||||
|
--enable-libopus \
|
||||||
|
--enable-libtheora \
|
||||||
|
--enable-libvorbis \
|
||||||
|
--enable-libvpx \
|
||||||
|
--enable-libwebp \
|
||||||
|
--enable-libxcb \
|
||||||
|
--enable-libx265 \
|
||||||
|
--enable-libxvid \
|
||||||
|
--enable-libx264 \
|
||||||
|
--enable-nonfree \
|
||||||
|
--enable-openssl \
|
||||||
|
--enable-libfdk_aac \
|
||||||
|
--enable-postproc \
|
||||||
|
--enable-small \
|
||||||
|
--enable-version3 \
|
||||||
|
--enable-libzmq \
|
||||||
|
--extra-libs=-ldl \
|
||||||
|
--prefix="${PREFIX}" \
|
||||||
|
--enable-libopenjpeg \
|
||||||
|
--enable-libkvazaar \
|
||||||
|
--enable-libaom \
|
||||||
|
--extra-libs=-lpthread \
|
||||||
|
--enable-libsrt \
|
||||||
|
--enable-libaribb24 \
|
||||||
|
--enable-nvenc \
|
||||||
|
--enable-cuda \
|
||||||
|
--enable-cuvid \
|
||||||
|
--enable-libnpp \
|
||||||
|
--extra-cflags="-I${PREFIX}/include -I${PREFIX}/include/ffnvcodec -I/usr/local/cuda/include/" \
|
||||||
|
--extra-ldflags="-L${PREFIX}/lib -L/usr/local/cuda/lib64 -L/usr/local/cuda/lib32/" && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
make tools/zmqsend && cp tools/zmqsend ${PREFIX}/bin/ && \
|
||||||
|
make distclean && \
|
||||||
|
hash -r && \
|
||||||
|
cd tools && \
|
||||||
|
make qt-faststart && cp qt-faststart ${PREFIX}/bin/
|
||||||
|
|
||||||
|
## cleanup
|
||||||
|
RUN \
|
||||||
|
LD_LIBRARY_PATH="${PREFIX}/lib:${PREFIX}/lib64:${LD_LIBRARY_PATH}" ldd ${PREFIX}/bin/ffmpeg | grep opt/ffmpeg | cut -d ' ' -f 3 | xargs -i cp {} /usr/local/lib/ && \
|
||||||
|
for lib in /usr/local/lib/*.so.*; do ln -s "${lib##*/}" "${lib%%.so.*}".so; done && \
|
||||||
|
cp ${PREFIX}/bin/* /usr/local/bin/ && \
|
||||||
|
cp -r ${PREFIX}/share/* /usr/local/share/ && \
|
||||||
|
LD_LIBRARY_PATH=/usr/local/lib ffmpeg -buildconf && \
|
||||||
|
cp -r ${PREFIX}/include/libav* ${PREFIX}/include/libpostproc ${PREFIX}/include/libsw* /usr/local/include && \
|
||||||
|
mkdir -p /usr/local/lib/pkgconfig && \
|
||||||
|
for pc in ${PREFIX}/lib/pkgconfig/libav*.pc ${PREFIX}/lib/pkgconfig/libpostproc.pc ${PREFIX}/lib/pkgconfig/libsw*.pc; do \
|
||||||
|
sed "s:${PREFIX}:/usr/local:g; s:/lib64:/lib:g" <"$pc" >/usr/local/lib/pkgconfig/"${pc##*/}"; \
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
FROM runtime-base AS release
|
||||||
|
|
||||||
|
ENV LD_LIBRARY_PATH=/usr/local/lib:/usr/local/lib64
|
||||||
|
|
||||||
|
CMD ["--help"]
|
||||||
|
ENTRYPOINT ["ffmpeg"]
|
||||||
|
|
||||||
|
# copy only needed files, without copying nvidia dev files
|
||||||
|
COPY --from=build /usr/local/bin /usr/local/bin/
|
||||||
|
COPY --from=build /usr/local/share /usr/local/share/
|
||||||
|
COPY --from=build /usr/local/lib /usr/local/lib/
|
||||||
|
COPY --from=build /usr/local/include /usr/local/include/
|
||||||
|
|
||||||
|
# Let's make sure the app built correctly
|
||||||
|
# Convenient to verify on https://hub.docker.com/r/jrottenberg/ffmpeg/builds/ console output
|
||||||
490
docker/Dockerfile.ffmpeg.armv7
Normal file
@@ -0,0 +1,490 @@
|
|||||||
|
# inspired by:
|
||||||
|
# https://github.com/collelog/ffmpeg/blob/master/4.3.1-alpine-rpi4-arm64v8.Dockerfile
|
||||||
|
# https://github.com/mmastrac/ffmpeg-omx-rpi-docker/blob/master/Dockerfile
|
||||||
|
# https://github.com/jrottenberg/ffmpeg/pull/158/files
|
||||||
|
# https://github.com/jrottenberg/ffmpeg/pull/239
|
||||||
|
FROM ubuntu:20.04 AS base
|
||||||
|
|
||||||
|
WORKDIR /tmp/workdir
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
RUN apt-get -yqq update && \
|
||||||
|
apt-get install -yq --no-install-recommends ca-certificates expat libgomp1 && \
|
||||||
|
apt-get autoremove -y && \
|
||||||
|
apt-get clean -y
|
||||||
|
|
||||||
|
FROM base as build
|
||||||
|
|
||||||
|
ENV FFMPEG_VERSION=4.3.1 \
|
||||||
|
AOM_VERSION=v1.0.0 \
|
||||||
|
FDKAAC_VERSION=0.1.5 \
|
||||||
|
FREETYPE_VERSION=2.5.5 \
|
||||||
|
FRIBIDI_VERSION=0.19.7 \
|
||||||
|
KVAZAAR_VERSION=1.2.0 \
|
||||||
|
LAME_VERSION=3.100 \
|
||||||
|
LIBPTHREAD_STUBS_VERSION=0.4 \
|
||||||
|
LIBVIDSTAB_VERSION=1.1.0 \
|
||||||
|
LIBXCB_VERSION=1.13.1 \
|
||||||
|
XCBPROTO_VERSION=1.13 \
|
||||||
|
OGG_VERSION=1.3.2 \
|
||||||
|
OPENCOREAMR_VERSION=0.1.5 \
|
||||||
|
OPUS_VERSION=1.2 \
|
||||||
|
OPENJPEG_VERSION=2.1.2 \
|
||||||
|
THEORA_VERSION=1.1.1 \
|
||||||
|
VORBIS_VERSION=1.3.5 \
|
||||||
|
VPX_VERSION=1.8.0 \
|
||||||
|
WEBP_VERSION=1.0.2 \
|
||||||
|
X264_VERSION=20170226-2245-stable \
|
||||||
|
X265_VERSION=3.1.1 \
|
||||||
|
XAU_VERSION=1.0.9 \
|
||||||
|
XORG_MACROS_VERSION=1.19.2 \
|
||||||
|
XPROTO_VERSION=7.0.31 \
|
||||||
|
XVID_VERSION=1.3.4 \
|
||||||
|
LIBZMQ_VERSION=4.3.3 \
|
||||||
|
SRC=/usr/local
|
||||||
|
|
||||||
|
ARG FREETYPE_SHA256SUM="5d03dd76c2171a7601e9ce10551d52d4471cf92cd205948e60289251daddffa8 freetype-2.5.5.tar.gz"
|
||||||
|
ARG FRIBIDI_SHA256SUM="3fc96fa9473bd31dcb5500bdf1aa78b337ba13eb8c301e7c28923fea982453a8 0.19.7.tar.gz"
|
||||||
|
ARG LIBVIDSTAB_SHA256SUM="14d2a053e56edad4f397be0cb3ef8eb1ec3150404ce99a426c4eb641861dc0bb v1.1.0.tar.gz"
|
||||||
|
ARG OGG_SHA256SUM="e19ee34711d7af328cb26287f4137e70630e7261b17cbe3cd41011d73a654692 libogg-1.3.2.tar.gz"
|
||||||
|
ARG OPUS_SHA256SUM="77db45a87b51578fbc49555ef1b10926179861d854eb2613207dc79d9ec0a9a9 opus-1.2.tar.gz"
|
||||||
|
ARG THEORA_SHA256SUM="40952956c47811928d1e7922cda3bc1f427eb75680c3c37249c91e949054916b libtheora-1.1.1.tar.gz"
|
||||||
|
ARG VORBIS_SHA256SUM="6efbcecdd3e5dfbf090341b485da9d176eb250d893e3eb378c428a2db38301ce libvorbis-1.3.5.tar.gz"
|
||||||
|
ARG XVID_SHA256SUM="4e9fd62728885855bc5007fe1be58df42e5e274497591fec37249e1052ae316f xvidcore-1.3.4.tar.gz"
|
||||||
|
|
||||||
|
|
||||||
|
ARG LD_LIBRARY_PATH=/opt/ffmpeg/lib
|
||||||
|
ARG MAKEFLAGS="-j2"
|
||||||
|
ARG PKG_CONFIG_PATH="/opt/ffmpeg/share/pkgconfig:/opt/ffmpeg/lib/pkgconfig:/opt/ffmpeg/lib64/pkgconfig:/opt/vc/lib/pkgconfig"
|
||||||
|
ARG PREFIX=/opt/ffmpeg
|
||||||
|
ARG LD_LIBRARY_PATH="/opt/ffmpeg/lib:/opt/ffmpeg/lib64:/usr/lib64:/usr/lib:/lib64:/lib:/opt/vc/lib"
|
||||||
|
|
||||||
|
|
||||||
|
RUN buildDeps="autoconf \
|
||||||
|
automake \
|
||||||
|
cmake \
|
||||||
|
curl \
|
||||||
|
bzip2 \
|
||||||
|
libexpat1-dev \
|
||||||
|
g++ \
|
||||||
|
gcc \
|
||||||
|
git \
|
||||||
|
gperf \
|
||||||
|
libtool \
|
||||||
|
make \
|
||||||
|
nasm \
|
||||||
|
perl \
|
||||||
|
pkg-config \
|
||||||
|
python \
|
||||||
|
sudo \
|
||||||
|
libssl-dev \
|
||||||
|
yasm \
|
||||||
|
linux-headers-raspi2 \
|
||||||
|
libomxil-bellagio-dev \
|
||||||
|
libx265-dev \
|
||||||
|
libaom-dev \
|
||||||
|
zlib1g-dev" && \
|
||||||
|
apt-get -yqq update && \
|
||||||
|
apt-get install -yq --no-install-recommends ${buildDeps}
|
||||||
|
## opencore-amr https://sourceforge.net/projects/opencore-amr/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/opencore-amr && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://versaweb.dl.sourceforge.net/project/opencore-amr/opencore-amr/opencore-amr-${OPENCOREAMR_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## x264 http://www.videolan.org/developers/x264.html
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/x264 && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://download.videolan.org/pub/videolan/x264/snapshots/x264-snapshot-${X264_VERSION}.tar.bz2 | \
|
||||||
|
tar -jx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared --enable-pic --disable-cli && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
# ### x265 http://x265.org/
|
||||||
|
# RUN \
|
||||||
|
# DIR=/tmp/x265 && \
|
||||||
|
# mkdir -p ${DIR} && \
|
||||||
|
# cd ${DIR} && \
|
||||||
|
# curl -sL https://download.videolan.org/pub/videolan/x265/x265_${X265_VERSION}.tar.gz | \
|
||||||
|
# tar -zx && \
|
||||||
|
# cd x265_${X265_VERSION}/build/linux && \
|
||||||
|
# sed -i "/-DEXTRA_LIB/ s/$/ -DCMAKE_INSTALL_PREFIX=\${PREFIX}/" multilib.sh && \
|
||||||
|
# sed -i "/^cmake/ s/$/ -DENABLE_CLI=OFF/" multilib.sh && \
|
||||||
|
# # export CXXFLAGS="${CXXFLAGS} -fPIC" && \
|
||||||
|
# ./multilib.sh && \
|
||||||
|
# make -C 8bit install && \
|
||||||
|
# rm -rf ${DIR}
|
||||||
|
### libogg https://www.xiph.org/ogg/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ogg && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/ogg/libogg-${OGG_VERSION}.tar.gz && \
|
||||||
|
echo ${OGG_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libogg-${OGG_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libopus https://www.opus-codec.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/opus && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://archive.mozilla.org/pub/opus/opus-${OPUS_VERSION}.tar.gz && \
|
||||||
|
echo ${OPUS_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f opus-${OPUS_VERSION}.tar.gz && \
|
||||||
|
autoreconf -fiv && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libvorbis https://xiph.org/vorbis/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vorbis && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/vorbis/libvorbis-${VORBIS_VERSION}.tar.gz && \
|
||||||
|
echo ${VORBIS_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libvorbis-${VORBIS_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --with-ogg="${PREFIX}" --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libtheora http://www.theora.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/theora && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xiph.org/releases/theora/libtheora-${THEORA_VERSION}.tar.gz && \
|
||||||
|
echo ${THEORA_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f libtheora-${THEORA_VERSION}.tar.gz && \
|
||||||
|
curl -sL 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD' -o config.guess && \
|
||||||
|
curl -sL 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD' -o config.sub && \
|
||||||
|
./configure --prefix="${PREFIX}" --with-ogg="${PREFIX}" --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libvpx https://www.webmproject.org/code/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vpx && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://codeload.github.com/webmproject/libvpx/tar.gz/v${VPX_VERSION} | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-vp8 --enable-vp9 --enable-vp9-highbitdepth --enable-pic --enable-shared \
|
||||||
|
--disable-debug --disable-examples --disable-docs --disable-install-bins && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libwebp https://developers.google.com/speed/webp/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vebp && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://storage.googleapis.com/downloads.webmproject.org/releases/webp/libwebp-${WEBP_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### libmp3lame http://lame.sourceforge.net/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/lame && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://versaweb.dl.sourceforge.net/project/lame/lame/$(echo ${LAME_VERSION} | sed -e 's/[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z-]*\)/\1.\2/')/lame-${LAME_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
./configure --prefix="${PREFIX}" --bindir="${PREFIX}/bin" --enable-shared --enable-nasm --disable-frontend && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### xvid https://www.xvid.com/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xvid && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO http://downloads.xvid.org/downloads/xvidcore-${XVID_VERSION}.tar.gz && \
|
||||||
|
echo ${XVID_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx -f xvidcore-${XVID_VERSION}.tar.gz && \
|
||||||
|
cd xvidcore/build/generic && \
|
||||||
|
./configure --prefix="${PREFIX}" --bindir="${PREFIX}/bin" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
### fdk-aac https://github.com/mstorsjo/fdk-aac
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/fdk-aac && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://github.com/mstorsjo/fdk-aac/archive/v${FDKAAC_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
autoreconf -fiv && \
|
||||||
|
./configure --prefix="${PREFIX}" --enable-shared --datadir="${DIR}" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## openjpeg https://github.com/uclouvain/openjpeg
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/openjpeg && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sL https://github.com/uclouvain/openjpeg/archive/v${OPENJPEG_VERSION}.tar.gz | \
|
||||||
|
tar -zx --strip-components=1 && \
|
||||||
|
export CFLAGS="${CFLAGS} -DPNG_ARM_NEON_OPT=0" && \
|
||||||
|
cmake -DBUILD_THIRDPARTY:BOOL=ON -DCMAKE_INSTALL_PREFIX="${PREFIX}" . && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## freetype https://www.freetype.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/freetype && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://download.savannah.gnu.org/releases/freetype/freetype-${FREETYPE_VERSION}.tar.gz && \
|
||||||
|
echo ${FREETYPE_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f freetype-${FREETYPE_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## libvstab https://github.com/georgmartius/vid.stab
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/vid.stab && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/georgmartius/vid.stab/archive/v${LIBVIDSTAB_VERSION}.tar.gz && \
|
||||||
|
echo ${LIBVIDSTAB_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f v${LIBVIDSTAB_VERSION}.tar.gz && \
|
||||||
|
cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" . && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
## fridibi https://www.fribidi.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/fribidi && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/fribidi/fribidi/archive/${FRIBIDI_VERSION}.tar.gz && \
|
||||||
|
echo ${FRIBIDI_SHA256SUM} | sha256sum --check && \
|
||||||
|
tar -zx --strip-components=1 -f ${FRIBIDI_VERSION}.tar.gz && \
|
||||||
|
sed -i 's/^SUBDIRS =.*/SUBDIRS=gen.tab charset lib bin/' Makefile.am && \
|
||||||
|
./bootstrap --no-config --auto && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make -j1 && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## kvazaar https://github.com/ultravideo/kvazaar
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/kvazaar && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/ultravideo/kvazaar/archive/v${KVAZAAR_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f v${KVAZAAR_VERSION}.tar.gz && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
# RUN \
|
||||||
|
# DIR=/tmp/aom && \
|
||||||
|
# git clone --branch ${AOM_VERSION} --depth 1 https://aomedia.googlesource.com/aom ${DIR} ; \
|
||||||
|
# cd ${DIR} ; \
|
||||||
|
# rm -rf CMakeCache.txt CMakeFiles ; \
|
||||||
|
# mkdir -p ./aom_build ; \
|
||||||
|
# cd ./aom_build ; \
|
||||||
|
# cmake -DCMAKE_INSTALL_PREFIX="${PREFIX}" -DBUILD_SHARED_LIBS=1 ..; \
|
||||||
|
# make ; \
|
||||||
|
# make install ; \
|
||||||
|
# rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libxcb (and supporting libraries) for screen capture https://xcb.freedesktop.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xorg-macros && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive//individual/util/util-macros-${XORG_MACROS_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f util-macros-${XORG_MACROS_VERSION}.tar.gz && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/xproto && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive/individual/proto/xproto-${XPROTO_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f xproto-${XPROTO_VERSION}.tar.gz && \
|
||||||
|
curl -sL 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD' -o config.guess && \
|
||||||
|
curl -sL 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD' -o config.sub && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libXau && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://www.x.org/archive/individual/lib/libXau-${XAU_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libXau-${XAU_VERSION}.tar.gz && \
|
||||||
|
./configure --srcdir=${DIR} --prefix="${PREFIX}" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libpthread-stubs && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/libpthread-stubs-${LIBPTHREAD_STUBS_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libpthread-stubs-${LIBPTHREAD_STUBS_VERSION}.tar.gz && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libxcb-proto && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/xcb-proto-${XCBPROTO_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f xcb-proto-${XCBPROTO_VERSION}.tar.gz && \
|
||||||
|
ACLOCAL_PATH="${PREFIX}/share/aclocal" ./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libxcb && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://xcb.freedesktop.org/dist/libxcb-${LIBXCB_VERSION}.tar.gz && \
|
||||||
|
tar -zx --strip-components=1 -f libxcb-${LIBXCB_VERSION}.tar.gz && \
|
||||||
|
ACLOCAL_PATH="${PREFIX}/share/aclocal" ./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" --disable-static --enable-shared && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## libzmq https://github.com/zeromq/libzmq/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/libzmq && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
curl -sLO https://github.com/zeromq/libzmq/archive/v${LIBZMQ_VERSION}.tar.gz && \
|
||||||
|
tar -xz --strip-components=1 -f v${LIBZMQ_VERSION}.tar.gz && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --prefix="${PREFIX}" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
# make check && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## userland https://github.com/raspberrypi/userland
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/userland && \
|
||||||
|
mkdir -p ${DIR} && \
|
||||||
|
cd ${DIR} && \
|
||||||
|
git clone --depth 1 https://github.com/raspberrypi/userland.git . && \
|
||||||
|
./buildme && \
|
||||||
|
rm -rf ${DIR}
|
||||||
|
|
||||||
|
## ffmpeg https://ffmpeg.org/
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ffmpeg && mkdir -p ${DIR} && cd ${DIR} && \
|
||||||
|
curl -sLO https://ffmpeg.org/releases/ffmpeg-${FFMPEG_VERSION}.tar.bz2 && \
|
||||||
|
tar -jx --strip-components=1 -f ffmpeg-${FFMPEG_VERSION}.tar.bz2
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
DIR=/tmp/ffmpeg && mkdir -p ${DIR} && cd ${DIR} && \
|
||||||
|
./configure \
|
||||||
|
--disable-debug \
|
||||||
|
--disable-doc \
|
||||||
|
--disable-ffplay \
|
||||||
|
--enable-shared \
|
||||||
|
--enable-avresample \
|
||||||
|
--enable-libopencore-amrnb \
|
||||||
|
--enable-libopencore-amrwb \
|
||||||
|
--enable-gpl \
|
||||||
|
--enable-libfreetype \
|
||||||
|
--enable-libvidstab \
|
||||||
|
--enable-libmp3lame \
|
||||||
|
--enable-libopus \
|
||||||
|
--enable-libtheora \
|
||||||
|
--enable-libvorbis \
|
||||||
|
--enable-libvpx \
|
||||||
|
--enable-libwebp \
|
||||||
|
--enable-libxcb \
|
||||||
|
--enable-libx265 \
|
||||||
|
--enable-libxvid \
|
||||||
|
--enable-libx264 \
|
||||||
|
--enable-nonfree \
|
||||||
|
--enable-openssl \
|
||||||
|
--enable-libfdk_aac \
|
||||||
|
--enable-postproc \
|
||||||
|
--enable-small \
|
||||||
|
--enable-version3 \
|
||||||
|
--enable-libzmq \
|
||||||
|
--extra-libs=-ldl \
|
||||||
|
--prefix="${PREFIX}" \
|
||||||
|
--enable-libopenjpeg \
|
||||||
|
--enable-libkvazaar \
|
||||||
|
--enable-libaom \
|
||||||
|
--extra-libs=-lpthread \
|
||||||
|
--enable-omx \
|
||||||
|
--enable-omx-rpi \
|
||||||
|
--enable-mmal \
|
||||||
|
--enable-v4l2_m2m \
|
||||||
|
--enable-neon \
|
||||||
|
--extra-cflags="-I${PREFIX}/include" \
|
||||||
|
--extra-ldflags="-L${PREFIX}/lib" && \
|
||||||
|
make -j $(nproc) && \
|
||||||
|
make -j $(nproc) install && \
|
||||||
|
make tools/zmqsend && cp tools/zmqsend ${PREFIX}/bin/ && \
|
||||||
|
make distclean && \
|
||||||
|
hash -r && \
|
||||||
|
cd tools && \
|
||||||
|
make qt-faststart && cp qt-faststart ${PREFIX}/bin/
|
||||||
|
|
||||||
|
## cleanup
|
||||||
|
RUN \
|
||||||
|
ldd ${PREFIX}/bin/ffmpeg | grep opt/ffmpeg | cut -d ' ' -f 3 | xargs -i cp {} /usr/local/lib/ && \
|
||||||
|
# copy userland lib too
|
||||||
|
ldd ${PREFIX}/bin/ffmpeg | grep opt/vc | cut -d ' ' -f 3 | xargs -i cp {} /usr/local/lib/ && \
|
||||||
|
for lib in /usr/local/lib/*.so.*; do ln -s "${lib##*/}" "${lib%%.so.*}".so; done && \
|
||||||
|
cp ${PREFIX}/bin/* /usr/local/bin/ && \
|
||||||
|
cp -r ${PREFIX}/share/ffmpeg /usr/local/share/ && \
|
||||||
|
LD_LIBRARY_PATH=/usr/local/lib ffmpeg -buildconf && \
|
||||||
|
cp -r ${PREFIX}/include/libav* ${PREFIX}/include/libpostproc ${PREFIX}/include/libsw* /usr/local/include && \
|
||||||
|
mkdir -p /usr/local/lib/pkgconfig && \
|
||||||
|
for pc in ${PREFIX}/lib/pkgconfig/libav*.pc ${PREFIX}/lib/pkgconfig/libpostproc.pc ${PREFIX}/lib/pkgconfig/libsw*.pc; do \
|
||||||
|
sed "s:${PREFIX}:/usr/local:g" <"$pc" >/usr/local/lib/pkgconfig/"${pc##*/}"; \
|
||||||
|
done
|
||||||
|
|
||||||
|
FROM base AS release
|
||||||
|
|
||||||
|
ENV LD_LIBRARY_PATH=/usr/local/lib:/usr/local/lib64:/usr/lib:/usr/lib64:/lib:/lib64
|
||||||
|
|
||||||
|
RUN \
|
||||||
|
apt-get -yqq update && \
|
||||||
|
apt-get install -yq --no-install-recommends libx265-dev libaom-dev && \
|
||||||
|
apt-get autoremove -y && \
|
||||||
|
apt-get clean -y
|
||||||
|
|
||||||
|
CMD ["--help"]
|
||||||
|
ENTRYPOINT ["ffmpeg"]
|
||||||
|
|
||||||
|
COPY --from=build /usr/local /usr/local/
|
||||||
9
docker/Dockerfile.web
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
ARG NODE_VERSION=14.0
|
||||||
|
|
||||||
|
FROM node:${NODE_VERSION}
|
||||||
|
|
||||||
|
WORKDIR /opt/frigate
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
RUN npm install && npm run build
|
||||||
42
docker/Dockerfile.wheels
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
FROM ubuntu:20.04 as build
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
RUN apt-get -qq update \
|
||||||
|
&& apt-get -qq install -y \
|
||||||
|
python3 \
|
||||||
|
python3-dev \
|
||||||
|
wget \
|
||||||
|
# opencv dependencies
|
||||||
|
build-essential cmake git pkg-config libgtk-3-dev \
|
||||||
|
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
|
||||||
|
libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev \
|
||||||
|
gfortran openexr libatlas-base-dev libssl-dev\
|
||||||
|
libtbb2 libtbb-dev libdc1394-22-dev libopenexr-dev \
|
||||||
|
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
|
||||||
|
# scipy dependencies
|
||||||
|
gcc gfortran libopenblas-dev liblapack-dev cython
|
||||||
|
|
||||||
|
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||||
|
&& python3 get-pip.py "pip==20.2.4"
|
||||||
|
|
||||||
|
RUN pip3 install scikit-build
|
||||||
|
|
||||||
|
RUN pip3 wheel --wheel-dir=/wheels \
|
||||||
|
opencv-python-headless \
|
||||||
|
# pinning due to issue in 1.19.5 https://github.com/numpy/numpy/issues/18131
|
||||||
|
numpy==1.19.4 \
|
||||||
|
imutils \
|
||||||
|
scipy \
|
||||||
|
psutil \
|
||||||
|
Flask \
|
||||||
|
paho-mqtt \
|
||||||
|
PyYAML \
|
||||||
|
matplotlib \
|
||||||
|
click \
|
||||||
|
setproctitle \
|
||||||
|
peewee
|
||||||
|
|
||||||
|
FROM scratch
|
||||||
|
|
||||||
|
COPY --from=build /wheels /wheels
|
||||||
42
docs/cameras.md
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
# Camera Specific Configuration
|
||||||
|
Frigate should work with most RTSP cameras and h264 feeds such as Dahua.
|
||||||
|
|
||||||
|
## RTMP Cameras
|
||||||
|
The input parameters need to be adjusted for RTMP cameras
|
||||||
|
```yaml
|
||||||
|
ffmpeg:
|
||||||
|
input_args:
|
||||||
|
- -avoid_negative_ts
|
||||||
|
- make_zero
|
||||||
|
- -fflags
|
||||||
|
- nobuffer
|
||||||
|
- -flags
|
||||||
|
- low_delay
|
||||||
|
- -strict
|
||||||
|
- experimental
|
||||||
|
- -fflags
|
||||||
|
- +genpts+discardcorrupt
|
||||||
|
- -use_wallclock_as_timestamps
|
||||||
|
- '1'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Blue Iris RTSP Cameras
|
||||||
|
You will need to remove `nobuffer` flag for Blue Iris RTSP cameras
|
||||||
|
```yaml
|
||||||
|
ffmpeg:
|
||||||
|
input_args:
|
||||||
|
- -avoid_negative_ts
|
||||||
|
- make_zero
|
||||||
|
- -flags
|
||||||
|
- low_delay
|
||||||
|
- -strict
|
||||||
|
- experimental
|
||||||
|
- -fflags
|
||||||
|
- +genpts+discardcorrupt
|
||||||
|
- -rtsp_transport
|
||||||
|
- tcp
|
||||||
|
- -stimeout
|
||||||
|
- "5000000"
|
||||||
|
- -use_wallclock_as_timestamps
|
||||||
|
- "1"
|
||||||
|
```
|
||||||
BIN
docs/diagram.png
Normal file
|
After Width: | Height: | Size: 132 KiB |
BIN
docs/example-mask-check-point.png
Normal file
|
After Width: | Height: | Size: 2.2 MiB |
BIN
docs/example-mask-overlay.png
Normal file
|
After Width: | Height: | Size: 2.1 MiB |
BIN
docs/example-mask-poly.png
Normal file
|
After Width: | Height: | Size: 2.1 MiB |
BIN
docs/example-mask.bmp
Normal file
|
After Width: | Height: | Size: 6.0 MiB |
BIN
docs/frigate.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
10
docs/how-frigate-works.md
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
# How Frigate Works
|
||||||
|
Frigate is designed to minimize resource and maximize performance by only looking for objects when and where it is necessary
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## 1. Look for Motion
|
||||||
|
|
||||||
|
## 2. Calculate Detection Regions
|
||||||
|
|
||||||
|
## 3. Run Object Detection
|
||||||
BIN
docs/media_browser.png
Normal file
|
After Width: | Height: | Size: 781 KiB |
71
docs/notification-examples.md
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# Notification examples
|
||||||
|
|
||||||
|
Here are some examples of notifications for the HomeAssistant android companion app:
|
||||||
|
```yaml
|
||||||
|
automation:
|
||||||
|
|
||||||
|
- alias: When a person enters a zone named yard
|
||||||
|
trigger:
|
||||||
|
platform: mqtt
|
||||||
|
topic: frigate/events
|
||||||
|
conditions:
|
||||||
|
- "{{ trigger.payload_json['after']['label'] == 'person' }}"
|
||||||
|
- "{{ 'yard' in trigger.payload_json['after']['entered_zones'] }}"
|
||||||
|
action:
|
||||||
|
- service: notify.mobile_app_pixel_3
|
||||||
|
data_template:
|
||||||
|
message: "A {{trigger.payload_json['after']['label']}} has entered the yard."
|
||||||
|
data:
|
||||||
|
image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg"
|
||||||
|
tag: "{{trigger.payload_json['after']['id']}}"
|
||||||
|
|
||||||
|
- alias: When a person leaves a zone named yard
|
||||||
|
trigger:
|
||||||
|
platform: mqtt
|
||||||
|
topic: frigate/events
|
||||||
|
conditions:
|
||||||
|
- "{{ trigger.payload_json['after']['label'] == 'person' }}"
|
||||||
|
- "{{ 'yard' in trigger.payload_json['before']['current_zones'] }}"
|
||||||
|
- "{{ not 'yard' in trigger.payload_json['after']['current_zones'] }}"
|
||||||
|
action:
|
||||||
|
- service: notify.mobile_app_pixel_3
|
||||||
|
data_template:
|
||||||
|
message: "A {{trigger.payload_json['after']['label']}} has left the yard."
|
||||||
|
data:
|
||||||
|
image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg"
|
||||||
|
tag: "{{trigger.payload_json['after']['id']}}"
|
||||||
|
|
||||||
|
- alias: Notify for dogs in the front with a high top score
|
||||||
|
trigger:
|
||||||
|
platform: mqtt
|
||||||
|
topic: frigate/events
|
||||||
|
conditions:
|
||||||
|
- "{{ trigger.payload_json['after']['label'] == 'dog' }}"
|
||||||
|
- "{{ trigger.payload_json['after']['camera'] == 'front' }}"
|
||||||
|
- "{{ trigger.payload_json['after']['top_score'] > 0.98 }}"
|
||||||
|
action:
|
||||||
|
- service: notify.mobile_app_pixel_3
|
||||||
|
data_template:
|
||||||
|
message: 'High confidence dog detection.'
|
||||||
|
data:
|
||||||
|
image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg"
|
||||||
|
tag: "{{trigger.payload_json['after']['id']}}"
|
||||||
|
```
|
||||||
|
|
||||||
|
If you are using telegram, you can fetch the image directly from Frigate:
|
||||||
|
```yaml
|
||||||
|
automation:
|
||||||
|
- alias: Notify of events
|
||||||
|
trigger:
|
||||||
|
platform: mqtt
|
||||||
|
topic: frigate/events
|
||||||
|
action:
|
||||||
|
- service: notify.telegram_full
|
||||||
|
data_template:
|
||||||
|
message: 'A {{trigger.payload_json["after"]["label"]}} was detected.'
|
||||||
|
data:
|
||||||
|
photo:
|
||||||
|
# this url should work for addon users
|
||||||
|
- url: 'http://ccab4aaf-frigate:5000/api/events/{{trigger.payload_json["after"]["id"]}}/thumbnail.jpg'
|
||||||
|
caption : 'A {{trigger.payload_json["after"]["label"]}} was detected on {{ trigger.payload_json["after"]["camera"] }} camera'
|
||||||
|
```
|
||||||
BIN
docs/notification.png
Normal file
|
After Width: | Height: | Size: 1.5 MiB |
105
docs/nvdec.md
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
# nVidia hardware decoder (NVDEC)
|
||||||
|
|
||||||
|
Certain nvidia cards include a hardware decoder, which can greatly improve the
|
||||||
|
performance of video decoding. In order to use NVDEC, a special build of
|
||||||
|
ffmpeg with NVDEC support is required. The special docker architecture 'amd64nvidia'
|
||||||
|
includes this support for amd64 platforms. An aarch64 for the Jetson, which
|
||||||
|
also includes NVDEC may be added in the future.
|
||||||
|
|
||||||
|
## Docker setup
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
[nVidia closed source driver](https://www.nvidia.com/en-us/drivers/unix/) required to access NVDEC.
|
||||||
|
[nvidia-docker](https://github.com/NVIDIA/nvidia-docker) required to pass NVDEC to docker.
|
||||||
|
|
||||||
|
### Setting up docker-compose
|
||||||
|
|
||||||
|
In order to pass NVDEC, the docker engine must be set to `nvidia` and the environment variables
|
||||||
|
`NVIDIA_VISIBLE_DEVICES=all` and `NVIDIA_DRIVER_CAPABILITIES=compute,utility,video` must be set.
|
||||||
|
|
||||||
|
In a docker compose file, these lines need to be set:
|
||||||
|
```
|
||||||
|
services:
|
||||||
|
frigate:
|
||||||
|
...
|
||||||
|
image: blakeblackshear/frigate:stable-amd64nvidia
|
||||||
|
runtime: nvidia
|
||||||
|
environment:
|
||||||
|
- NVIDIA_VISIBLE_DEVICES=all
|
||||||
|
- NVIDIA_DRIVER_CAPABILITIES=compute,utility,video
|
||||||
|
```
|
||||||
|
|
||||||
|
### Setting up the configuration file
|
||||||
|
|
||||||
|
In your frigate config.yml, you'll need to set ffmpeg to use the hardware decoder.
|
||||||
|
The decoder you choose will depend on the input video.
|
||||||
|
|
||||||
|
A list of supported codecs (you can use `ffmpeg -decoders | grep cuvid` in the container to get a list)
|
||||||
|
```
|
||||||
|
V..... h263_cuvid Nvidia CUVID H263 decoder (codec h263)
|
||||||
|
V..... h264_cuvid Nvidia CUVID H264 decoder (codec h264)
|
||||||
|
V..... hevc_cuvid Nvidia CUVID HEVC decoder (codec hevc)
|
||||||
|
V..... mjpeg_cuvid Nvidia CUVID MJPEG decoder (codec mjpeg)
|
||||||
|
V..... mpeg1_cuvid Nvidia CUVID MPEG1VIDEO decoder (codec mpeg1video)
|
||||||
|
V..... mpeg2_cuvid Nvidia CUVID MPEG2VIDEO decoder (codec mpeg2video)
|
||||||
|
V..... mpeg4_cuvid Nvidia CUVID MPEG4 decoder (codec mpeg4)
|
||||||
|
V..... vc1_cuvid Nvidia CUVID VC1 decoder (codec vc1)
|
||||||
|
V..... vp8_cuvid Nvidia CUVID VP8 decoder (codec vp8)
|
||||||
|
V..... vp9_cuvid Nvidia CUVID VP9 decoder (codec vp9)
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, for H265 video (hevc), you'll select `hevc_cuvid`. Add
|
||||||
|
`-c:v hevc_covid` to your ffmpeg input arguments:
|
||||||
|
|
||||||
|
```
|
||||||
|
ffmpeg:
|
||||||
|
input_args:
|
||||||
|
...
|
||||||
|
- -c:v
|
||||||
|
- hevc_cuvid
|
||||||
|
```
|
||||||
|
|
||||||
|
If everything is working correctly, you should see a significant improvement in performance.
|
||||||
|
Verify that hardware decoding is working by running `nvidia-smi`, which should show the ffmpeg
|
||||||
|
processes:
|
||||||
|
|
||||||
|
```
|
||||||
|
+-----------------------------------------------------------------------------+
|
||||||
|
| NVIDIA-SMI 455.38 Driver Version: 455.38 CUDA Version: 11.1 |
|
||||||
|
|-------------------------------+----------------------+----------------------+
|
||||||
|
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
|
||||||
|
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|
||||||
|
| | | MIG M. |
|
||||||
|
|===============================+======================+======================|
|
||||||
|
| 0 GeForce GTX 166... Off | 00000000:03:00.0 Off | N/A |
|
||||||
|
| 38% 41C P2 36W / 125W | 2082MiB / 5942MiB | 5% Default |
|
||||||
|
| | | N/A |
|
||||||
|
+-------------------------------+----------------------+----------------------+
|
||||||
|
|
||||||
|
+-----------------------------------------------------------------------------+
|
||||||
|
| Processes: |
|
||||||
|
| GPU GI CI PID Type Process name GPU Memory |
|
||||||
|
| ID ID Usage |
|
||||||
|
|=============================================================================|
|
||||||
|
| 0 N/A N/A 12737 C ffmpeg 249MiB |
|
||||||
|
| 0 N/A N/A 12751 C ffmpeg 249MiB |
|
||||||
|
| 0 N/A N/A 12772 C ffmpeg 249MiB |
|
||||||
|
| 0 N/A N/A 12775 C ffmpeg 249MiB |
|
||||||
|
| 0 N/A N/A 12800 C ffmpeg 249MiB |
|
||||||
|
| 0 N/A N/A 12811 C ffmpeg 417MiB |
|
||||||
|
| 0 N/A N/A 12827 C ffmpeg 417MiB |
|
||||||
|
+-----------------------------------------------------------------------------+
|
||||||
|
```
|
||||||
|
|
||||||
|
To further improve performance, you can set ffmpeg to skip frames in the output,
|
||||||
|
using the fps filter:
|
||||||
|
|
||||||
|
```
|
||||||
|
output_args:
|
||||||
|
- -filter:v
|
||||||
|
- fps=fps=5
|
||||||
|
```
|
||||||
|
|
||||||
|
This setting, for example, allows Frigate to consume my 10-15fps camera streams on
|
||||||
|
my relatively low powered Haswell machine with relatively low cpu usage.
|
||||||
|
|
||||||
BIN
docs/zone_example.jpg
Normal file
|
After Width: | Height: | Size: 73 KiB |
0
frigate/__init__.py
Normal file
15
frigate/__main__.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
import faulthandler; faulthandler.enable()
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
|
||||||
|
threading.current_thread().name = "frigate"
|
||||||
|
|
||||||
|
from frigate.app import FrigateApp
|
||||||
|
|
||||||
|
cli = sys.modules['flask.cli']
|
||||||
|
cli.show_server_banner = lambda *x: None
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
frigate_app = FrigateApp()
|
||||||
|
|
||||||
|
frigate_app.start()
|
||||||
258
frigate/app.py
Normal file
@@ -0,0 +1,258 @@
|
|||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import multiprocessing as mp
|
||||||
|
import os
|
||||||
|
from logging.handlers import QueueHandler
|
||||||
|
from typing import Dict, List
|
||||||
|
import sys
|
||||||
|
import signal
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
from peewee_migrate import Router
|
||||||
|
from playhouse.sqlite_ext import SqliteExtDatabase
|
||||||
|
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
|
||||||
|
from frigate.edgetpu import EdgeTPUProcess
|
||||||
|
from frigate.events import EventProcessor, EventCleanup
|
||||||
|
from frigate.http import create_app
|
||||||
|
from frigate.log import log_process, root_configurer
|
||||||
|
from frigate.models import Event
|
||||||
|
from frigate.mqtt import create_mqtt_client
|
||||||
|
from frigate.object_processing import TrackedObjectProcessor
|
||||||
|
from frigate.record import RecordingMaintainer
|
||||||
|
from frigate.stats import StatsEmitter, stats_init
|
||||||
|
from frigate.video import capture_camera, track_camera
|
||||||
|
from frigate.watchdog import FrigateWatchdog
|
||||||
|
from frigate.zeroconf import broadcast_zeroconf
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class FrigateApp():
|
||||||
|
def __init__(self):
|
||||||
|
self.stop_event = mp.Event()
|
||||||
|
self.config: FrigateConfig = None
|
||||||
|
self.detection_queue = mp.Queue()
|
||||||
|
self.detectors: Dict[str, EdgeTPUProcess] = {}
|
||||||
|
self.detection_out_events: Dict[str, mp.Event] = {}
|
||||||
|
self.detection_shms: List[mp.shared_memory.SharedMemory] = []
|
||||||
|
self.log_queue = mp.Queue()
|
||||||
|
self.camera_metrics = {}
|
||||||
|
|
||||||
|
def set_environment_vars(self):
|
||||||
|
for key, value in self.config.environment_vars.items():
|
||||||
|
os.environ[key] = value
|
||||||
|
|
||||||
|
def ensure_dirs(self):
|
||||||
|
for d in [RECORD_DIR, CLIPS_DIR, CACHE_DIR]:
|
||||||
|
if not os.path.exists(d) and not os.path.islink(d):
|
||||||
|
logger.info(f"Creating directory: {d}")
|
||||||
|
os.makedirs(d)
|
||||||
|
else:
|
||||||
|
logger.debug(f"Skipping directory: {d}")
|
||||||
|
|
||||||
|
tmpfs_size = self.config.clips.tmpfs_cache_size
|
||||||
|
if tmpfs_size:
|
||||||
|
logger.info(f"Creating tmpfs of size {tmpfs_size}")
|
||||||
|
rc = os.system(f"mount -t tmpfs -o size={tmpfs_size} tmpfs {CACHE_DIR}")
|
||||||
|
if rc != 0:
|
||||||
|
logger.error(f"Failed to create tmpfs, error code: {rc}")
|
||||||
|
|
||||||
|
def init_logger(self):
|
||||||
|
self.log_process = mp.Process(target=log_process, args=(self.log_queue,), name='log_process')
|
||||||
|
self.log_process.daemon = True
|
||||||
|
self.log_process.start()
|
||||||
|
root_configurer(self.log_queue)
|
||||||
|
|
||||||
|
def init_config(self):
|
||||||
|
config_file = os.environ.get('CONFIG_FILE', '/config/config.yml')
|
||||||
|
self.config = FrigateConfig(config_file=config_file)
|
||||||
|
|
||||||
|
for camera_name in self.config.cameras.keys():
|
||||||
|
# create camera_metrics
|
||||||
|
self.camera_metrics[camera_name] = {
|
||||||
|
'camera_fps': mp.Value('d', 0.0),
|
||||||
|
'skipped_fps': mp.Value('d', 0.0),
|
||||||
|
'process_fps': mp.Value('d', 0.0),
|
||||||
|
'detection_enabled': mp.Value('i', self.config.cameras[camera_name].detect.enabled),
|
||||||
|
'detection_fps': mp.Value('d', 0.0),
|
||||||
|
'detection_frame': mp.Value('d', 0.0),
|
||||||
|
'read_start': mp.Value('d', 0.0),
|
||||||
|
'ffmpeg_pid': mp.Value('i', 0),
|
||||||
|
'frame_queue': mp.Queue(maxsize=2),
|
||||||
|
}
|
||||||
|
|
||||||
|
def check_config(self):
|
||||||
|
for name, camera in self.config.cameras.items():
|
||||||
|
assigned_roles = list(set([r for i in camera.ffmpeg.inputs for r in i.roles]))
|
||||||
|
if not camera.clips.enabled and 'clips' in assigned_roles:
|
||||||
|
logger.warning(f"Camera {name} has clips assigned to an input, but clips is not enabled.")
|
||||||
|
elif camera.clips.enabled and not 'clips' in assigned_roles:
|
||||||
|
logger.warning(f"Camera {name} has clips enabled, but clips is not assigned to an input.")
|
||||||
|
|
||||||
|
if not camera.record.enabled and 'record' in assigned_roles:
|
||||||
|
logger.warning(f"Camera {name} has record assigned to an input, but record is not enabled.")
|
||||||
|
elif camera.record.enabled and not 'record' in assigned_roles:
|
||||||
|
logger.warning(f"Camera {name} has record enabled, but record is not assigned to an input.")
|
||||||
|
|
||||||
|
if not camera.rtmp.enabled and 'rtmp' in assigned_roles:
|
||||||
|
logger.warning(f"Camera {name} has rtmp assigned to an input, but rtmp is not enabled.")
|
||||||
|
elif camera.rtmp.enabled and not 'rtmp' in assigned_roles:
|
||||||
|
logger.warning(f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input.")
|
||||||
|
|
||||||
|
def set_log_levels(self):
|
||||||
|
logging.getLogger().setLevel(self.config.logger.default)
|
||||||
|
for log, level in self.config.logger.logs.items():
|
||||||
|
logging.getLogger(log).setLevel(level)
|
||||||
|
|
||||||
|
if not 'werkzeug' in self.config.logger.logs:
|
||||||
|
logging.getLogger('werkzeug').setLevel('ERROR')
|
||||||
|
|
||||||
|
def init_queues(self):
|
||||||
|
# Queues for clip processing
|
||||||
|
self.event_queue = mp.Queue()
|
||||||
|
self.event_processed_queue = mp.Queue()
|
||||||
|
|
||||||
|
# Queue for cameras to push tracked objects to
|
||||||
|
self.detected_frames_queue = mp.Queue(maxsize=len(self.config.cameras.keys())*2)
|
||||||
|
|
||||||
|
def init_database(self):
|
||||||
|
self.db = SqliteExtDatabase(self.config.database.path)
|
||||||
|
|
||||||
|
# Run migrations
|
||||||
|
del(logging.getLogger('peewee_migrate').handlers[:])
|
||||||
|
router = Router(self.db)
|
||||||
|
router.run()
|
||||||
|
|
||||||
|
models = [Event]
|
||||||
|
self.db.bind(models)
|
||||||
|
|
||||||
|
def init_stats(self):
|
||||||
|
self.stats_tracking = stats_init(self.camera_metrics, self.detectors)
|
||||||
|
|
||||||
|
def init_web_server(self):
|
||||||
|
self.flask_app = create_app(self.config, self.db, self.stats_tracking, self.detected_frames_processor)
|
||||||
|
|
||||||
|
def init_mqtt(self):
|
||||||
|
self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics)
|
||||||
|
|
||||||
|
def start_detectors(self):
|
||||||
|
model_shape = (self.config.model.height, self.config.model.width)
|
||||||
|
for name in self.config.cameras.keys():
|
||||||
|
self.detection_out_events[name] = mp.Event()
|
||||||
|
shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=self.config.model.height*self.config.model.width*3)
|
||||||
|
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20*6*4)
|
||||||
|
self.detection_shms.append(shm_in)
|
||||||
|
self.detection_shms.append(shm_out)
|
||||||
|
|
||||||
|
for name, detector in self.config.detectors.items():
|
||||||
|
if detector.type == 'cpu':
|
||||||
|
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, 'cpu', detector.num_threads)
|
||||||
|
if detector.type == 'edgetpu':
|
||||||
|
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, detector.device, detector.num_threads)
|
||||||
|
|
||||||
|
def start_detected_frames_processor(self):
|
||||||
|
self.detected_frames_processor = TrackedObjectProcessor(self.config, self.mqtt_client, self.config.mqtt.topic_prefix,
|
||||||
|
self.detected_frames_queue, self.event_queue, self.event_processed_queue, self.stop_event)
|
||||||
|
self.detected_frames_processor.start()
|
||||||
|
|
||||||
|
def start_camera_processors(self):
|
||||||
|
model_shape = (self.config.model.height, self.config.model.width)
|
||||||
|
for name, config in self.config.cameras.items():
|
||||||
|
camera_process = mp.Process(target=track_camera, name=f"camera_processor:{name}", args=(name, config, model_shape,
|
||||||
|
self.detection_queue, self.detection_out_events[name], self.detected_frames_queue,
|
||||||
|
self.camera_metrics[name]))
|
||||||
|
camera_process.daemon = True
|
||||||
|
self.camera_metrics[name]['process'] = camera_process
|
||||||
|
camera_process.start()
|
||||||
|
logger.info(f"Camera processor started for {name}: {camera_process.pid}")
|
||||||
|
|
||||||
|
def start_camera_capture_processes(self):
|
||||||
|
for name, config in self.config.cameras.items():
|
||||||
|
capture_process = mp.Process(target=capture_camera, name=f"camera_capture:{name}", args=(name, config,
|
||||||
|
self.camera_metrics[name]))
|
||||||
|
capture_process.daemon = True
|
||||||
|
self.camera_metrics[name]['capture_process'] = capture_process
|
||||||
|
capture_process.start()
|
||||||
|
logger.info(f"Capture process started for {name}: {capture_process.pid}")
|
||||||
|
|
||||||
|
def start_event_processor(self):
|
||||||
|
self.event_processor = EventProcessor(self.config, self.camera_metrics, self.event_queue, self.event_processed_queue, self.stop_event)
|
||||||
|
self.event_processor.start()
|
||||||
|
|
||||||
|
def start_event_cleanup(self):
|
||||||
|
self.event_cleanup = EventCleanup(self.config, self.stop_event)
|
||||||
|
self.event_cleanup.start()
|
||||||
|
|
||||||
|
def start_recording_maintainer(self):
|
||||||
|
self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event)
|
||||||
|
self.recording_maintainer.start()
|
||||||
|
|
||||||
|
def start_stats_emitter(self):
|
||||||
|
self.stats_emitter = StatsEmitter(self.config, self.stats_tracking, self.mqtt_client, self.config.mqtt.topic_prefix, self.stop_event)
|
||||||
|
self.stats_emitter.start()
|
||||||
|
|
||||||
|
def start_watchdog(self):
|
||||||
|
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
|
||||||
|
self.frigate_watchdog.start()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
self.init_logger()
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
self.init_config()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error parsing config: {e}")
|
||||||
|
self.log_process.terminate()
|
||||||
|
sys.exit(1)
|
||||||
|
self.set_environment_vars()
|
||||||
|
self.ensure_dirs()
|
||||||
|
self.check_config()
|
||||||
|
self.set_log_levels()
|
||||||
|
self.init_queues()
|
||||||
|
self.init_database()
|
||||||
|
self.init_mqtt()
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
self.log_process.terminate()
|
||||||
|
sys.exit(1)
|
||||||
|
self.start_detectors()
|
||||||
|
self.start_detected_frames_processor()
|
||||||
|
self.start_camera_processors()
|
||||||
|
self.start_camera_capture_processes()
|
||||||
|
self.init_stats()
|
||||||
|
self.init_web_server()
|
||||||
|
self.start_event_processor()
|
||||||
|
self.start_event_cleanup()
|
||||||
|
self.start_recording_maintainer()
|
||||||
|
self.start_stats_emitter()
|
||||||
|
self.start_watchdog()
|
||||||
|
# self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id)
|
||||||
|
|
||||||
|
def receiveSignal(signalNumber, frame):
|
||||||
|
self.stop()
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, receiveSignal)
|
||||||
|
|
||||||
|
self.flask_app.run(host='127.0.0.1', port=5001, debug=False)
|
||||||
|
self.stop()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
logger.info(f"Stopping...")
|
||||||
|
self.stop_event.set()
|
||||||
|
|
||||||
|
self.detected_frames_processor.join()
|
||||||
|
self.event_processor.join()
|
||||||
|
self.event_cleanup.join()
|
||||||
|
self.recording_maintainer.join()
|
||||||
|
self.stats_emitter.join()
|
||||||
|
self.frigate_watchdog.join()
|
||||||
|
|
||||||
|
for detector in self.detectors.values():
|
||||||
|
detector.stop()
|
||||||
|
|
||||||
|
while len(self.detection_shms) > 0:
|
||||||
|
shm = self.detection_shms.pop()
|
||||||
|
shm.close()
|
||||||
|
shm.unlink()
|
||||||
1072
frigate/config.py
Normal file
3
frigate/const.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
CLIPS_DIR = '/media/frigate/clips'
|
||||||
|
RECORD_DIR = '/media/frigate/recordings'
|
||||||
|
CACHE_DIR = '/tmp/cache'
|
||||||
226
frigate/edgetpu.py
Normal file
@@ -0,0 +1,226 @@
|
|||||||
|
import datetime
|
||||||
|
import hashlib
|
||||||
|
import logging
|
||||||
|
import multiprocessing as mp
|
||||||
|
import os
|
||||||
|
import queue
|
||||||
|
import threading
|
||||||
|
import signal
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from multiprocessing.connection import Connection
|
||||||
|
from setproctitle import setproctitle
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import tflite_runtime.interpreter as tflite
|
||||||
|
from tflite_runtime.interpreter import load_delegate
|
||||||
|
|
||||||
|
from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
def load_labels(path, encoding='utf-8'):
|
||||||
|
"""Loads labels from file (with or without index numbers).
|
||||||
|
Args:
|
||||||
|
path: path to label file.
|
||||||
|
encoding: label file encoding.
|
||||||
|
Returns:
|
||||||
|
Dictionary mapping indices to labels.
|
||||||
|
"""
|
||||||
|
with open(path, 'r', encoding=encoding) as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
if not lines:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
if lines[0].split(' ', maxsplit=1)[0].isdigit():
|
||||||
|
pairs = [line.split(' ', maxsplit=1) for line in lines]
|
||||||
|
return {int(index): label.strip() for index, label in pairs}
|
||||||
|
else:
|
||||||
|
return {index: line.strip() for index, line in enumerate(lines)}
|
||||||
|
|
||||||
|
class ObjectDetector(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
def detect(self, tensor_input, threshold = .4):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class LocalObjectDetector(ObjectDetector):
|
||||||
|
def __init__(self, tf_device=None, num_threads=3, labels=None):
|
||||||
|
self.fps = EventsPerSecond()
|
||||||
|
if labels is None:
|
||||||
|
self.labels = {}
|
||||||
|
else:
|
||||||
|
self.labels = load_labels(labels)
|
||||||
|
|
||||||
|
device_config = {"device": "usb"}
|
||||||
|
if not tf_device is None:
|
||||||
|
device_config = {"device": tf_device}
|
||||||
|
|
||||||
|
edge_tpu_delegate = None
|
||||||
|
|
||||||
|
if tf_device != 'cpu':
|
||||||
|
try:
|
||||||
|
logger.info(f"Attempting to load TPU as {device_config['device']}")
|
||||||
|
edge_tpu_delegate = load_delegate('libedgetpu.so.1.0', device_config)
|
||||||
|
logger.info("TPU found")
|
||||||
|
self.interpreter = tflite.Interpreter(
|
||||||
|
model_path='/edgetpu_model.tflite',
|
||||||
|
experimental_delegates=[edge_tpu_delegate])
|
||||||
|
except ValueError:
|
||||||
|
logger.info("No EdgeTPU detected.")
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
self.interpreter = tflite.Interpreter(
|
||||||
|
model_path='/cpu_model.tflite', num_threads=num_threads)
|
||||||
|
|
||||||
|
self.interpreter.allocate_tensors()
|
||||||
|
|
||||||
|
self.tensor_input_details = self.interpreter.get_input_details()
|
||||||
|
self.tensor_output_details = self.interpreter.get_output_details()
|
||||||
|
|
||||||
|
def detect(self, tensor_input, threshold=.4):
|
||||||
|
detections = []
|
||||||
|
|
||||||
|
raw_detections = self.detect_raw(tensor_input)
|
||||||
|
|
||||||
|
for d in raw_detections:
|
||||||
|
if d[1] < threshold:
|
||||||
|
break
|
||||||
|
detections.append((
|
||||||
|
self.labels[int(d[0])],
|
||||||
|
float(d[1]),
|
||||||
|
(d[2], d[3], d[4], d[5])
|
||||||
|
))
|
||||||
|
self.fps.update()
|
||||||
|
return detections
|
||||||
|
|
||||||
|
def detect_raw(self, tensor_input):
|
||||||
|
self.interpreter.set_tensor(self.tensor_input_details[0]['index'], tensor_input)
|
||||||
|
self.interpreter.invoke()
|
||||||
|
boxes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[0]['index']))
|
||||||
|
label_codes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[1]['index']))
|
||||||
|
scores = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[2]['index']))
|
||||||
|
|
||||||
|
detections = np.zeros((20,6), np.float32)
|
||||||
|
for i, score in enumerate(scores):
|
||||||
|
detections[i] = [label_codes[i], score, boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]]
|
||||||
|
|
||||||
|
return detections
|
||||||
|
|
||||||
|
def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.Event], avg_speed, start, model_shape, tf_device, num_threads):
|
||||||
|
threading.current_thread().name = f"detector:{name}"
|
||||||
|
logger = logging.getLogger(f"detector.{name}")
|
||||||
|
logger.info(f"Starting detection process: {os.getpid()}")
|
||||||
|
setproctitle(f"frigate.detector.{name}")
|
||||||
|
listen()
|
||||||
|
|
||||||
|
stop_event = mp.Event()
|
||||||
|
def receiveSignal(signalNumber, frame):
|
||||||
|
stop_event.set()
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, receiveSignal)
|
||||||
|
signal.signal(signal.SIGINT, receiveSignal)
|
||||||
|
|
||||||
|
frame_manager = SharedMemoryFrameManager()
|
||||||
|
object_detector = LocalObjectDetector(tf_device=tf_device, num_threads=num_threads)
|
||||||
|
|
||||||
|
outputs = {}
|
||||||
|
for name in out_events.keys():
|
||||||
|
out_shm = mp.shared_memory.SharedMemory(name=f"out-{name}", create=False)
|
||||||
|
out_np = np.ndarray((20,6), dtype=np.float32, buffer=out_shm.buf)
|
||||||
|
outputs[name] = {
|
||||||
|
'shm': out_shm,
|
||||||
|
'np': out_np
|
||||||
|
}
|
||||||
|
|
||||||
|
while True:
|
||||||
|
if stop_event.is_set():
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
connection_id = detection_queue.get(timeout=5)
|
||||||
|
except queue.Empty:
|
||||||
|
continue
|
||||||
|
input_frame = frame_manager.get(connection_id, (1,model_shape[0],model_shape[1],3))
|
||||||
|
|
||||||
|
if input_frame is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# detect and send the output
|
||||||
|
start.value = datetime.datetime.now().timestamp()
|
||||||
|
detections = object_detector.detect_raw(input_frame)
|
||||||
|
duration = datetime.datetime.now().timestamp()-start.value
|
||||||
|
outputs[connection_id]['np'][:] = detections[:]
|
||||||
|
out_events[connection_id].set()
|
||||||
|
start.value = 0.0
|
||||||
|
|
||||||
|
avg_speed.value = (avg_speed.value*9 + duration)/10
|
||||||
|
|
||||||
|
class EdgeTPUProcess():
|
||||||
|
def __init__(self, name, detection_queue, out_events, model_shape, tf_device=None, num_threads=3):
|
||||||
|
self.name = name
|
||||||
|
self.out_events = out_events
|
||||||
|
self.detection_queue = detection_queue
|
||||||
|
self.avg_inference_speed = mp.Value('d', 0.01)
|
||||||
|
self.detection_start = mp.Value('d', 0.0)
|
||||||
|
self.detect_process = None
|
||||||
|
self.model_shape = model_shape
|
||||||
|
self.tf_device = tf_device
|
||||||
|
self.num_threads = num_threads
|
||||||
|
self.start_or_restart()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self.detect_process.terminate()
|
||||||
|
logging.info("Waiting for detection process to exit gracefully...")
|
||||||
|
self.detect_process.join(timeout=30)
|
||||||
|
if self.detect_process.exitcode is None:
|
||||||
|
logging.info("Detection process didnt exit. Force killing...")
|
||||||
|
self.detect_process.kill()
|
||||||
|
self.detect_process.join()
|
||||||
|
|
||||||
|
def start_or_restart(self):
|
||||||
|
self.detection_start.value = 0.0
|
||||||
|
if (not self.detect_process is None) and self.detect_process.is_alive():
|
||||||
|
self.stop()
|
||||||
|
self.detect_process = mp.Process(target=run_detector, name=f"detector:{self.name}", args=(self.name, self.detection_queue, self.out_events, self.avg_inference_speed, self.detection_start, self.model_shape, self.tf_device, self.num_threads))
|
||||||
|
self.detect_process.daemon = True
|
||||||
|
self.detect_process.start()
|
||||||
|
|
||||||
|
class RemoteObjectDetector():
|
||||||
|
def __init__(self, name, labels, detection_queue, event, model_shape):
|
||||||
|
self.labels = load_labels(labels)
|
||||||
|
self.name = name
|
||||||
|
self.fps = EventsPerSecond()
|
||||||
|
self.detection_queue = detection_queue
|
||||||
|
self.event = event
|
||||||
|
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
|
||||||
|
self.np_shm = np.ndarray((1,model_shape[0],model_shape[1],3), dtype=np.uint8, buffer=self.shm.buf)
|
||||||
|
self.out_shm = mp.shared_memory.SharedMemory(name=f"out-{self.name}", create=False)
|
||||||
|
self.out_np_shm = np.ndarray((20,6), dtype=np.float32, buffer=self.out_shm.buf)
|
||||||
|
|
||||||
|
def detect(self, tensor_input, threshold=.4):
|
||||||
|
detections = []
|
||||||
|
|
||||||
|
# copy input to shared memory
|
||||||
|
self.np_shm[:] = tensor_input[:]
|
||||||
|
self.event.clear()
|
||||||
|
self.detection_queue.put(self.name)
|
||||||
|
result = self.event.wait(timeout=10.0)
|
||||||
|
|
||||||
|
# if it timed out
|
||||||
|
if result is None:
|
||||||
|
return detections
|
||||||
|
|
||||||
|
for d in self.out_np_shm:
|
||||||
|
if d[1] < threshold:
|
||||||
|
break
|
||||||
|
detections.append((
|
||||||
|
self.labels[int(d[0])],
|
||||||
|
float(d[1]),
|
||||||
|
(d[2], d[3], d[4], d[5])
|
||||||
|
))
|
||||||
|
self.fps.update()
|
||||||
|
return detections
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
self.shm.unlink()
|
||||||
|
self.out_shm.unlink()
|
||||||
306
frigate/events.py
Normal file
@@ -0,0 +1,306 @@
|
|||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import queue
|
||||||
|
import subprocess as sp
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from collections import defaultdict
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import psutil
|
||||||
|
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
|
||||||
|
from frigate.models import Event
|
||||||
|
|
||||||
|
from peewee import fn
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class EventProcessor(threading.Thread):
|
||||||
|
def __init__(self, config, camera_processes, event_queue, event_processed_queue, stop_event):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.name = 'event_processor'
|
||||||
|
self.config = config
|
||||||
|
self.camera_processes = camera_processes
|
||||||
|
self.cached_clips = {}
|
||||||
|
self.event_queue = event_queue
|
||||||
|
self.event_processed_queue = event_processed_queue
|
||||||
|
self.events_in_process = {}
|
||||||
|
self.stop_event = stop_event
|
||||||
|
|
||||||
|
def refresh_cache(self):
|
||||||
|
cached_files = os.listdir(CACHE_DIR)
|
||||||
|
|
||||||
|
files_in_use = []
|
||||||
|
for process in psutil.process_iter():
|
||||||
|
try:
|
||||||
|
if process.name() != 'ffmpeg':
|
||||||
|
continue
|
||||||
|
|
||||||
|
flist = process.open_files()
|
||||||
|
if flist:
|
||||||
|
for nt in flist:
|
||||||
|
if nt.path.startswith(CACHE_DIR):
|
||||||
|
files_in_use.append(nt.path.split('/')[-1])
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for f in cached_files:
|
||||||
|
if f in files_in_use or f in self.cached_clips:
|
||||||
|
continue
|
||||||
|
|
||||||
|
camera = '-'.join(f.split('-')[:-1])
|
||||||
|
start_time = datetime.datetime.strptime(f.split('-')[-1].split('.')[0], '%Y%m%d%H%M%S')
|
||||||
|
|
||||||
|
ffprobe_cmd = " ".join([
|
||||||
|
'ffprobe',
|
||||||
|
'-v',
|
||||||
|
'error',
|
||||||
|
'-show_entries',
|
||||||
|
'format=duration',
|
||||||
|
'-of',
|
||||||
|
'default=noprint_wrappers=1:nokey=1',
|
||||||
|
f"{os.path.join(CACHE_DIR,f)}"
|
||||||
|
])
|
||||||
|
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
|
||||||
|
(output, err) = p.communicate()
|
||||||
|
p_status = p.wait()
|
||||||
|
if p_status == 0:
|
||||||
|
duration = float(output.decode('utf-8').strip())
|
||||||
|
else:
|
||||||
|
logger.info(f"bad file: {f}")
|
||||||
|
os.remove(os.path.join(CACHE_DIR,f))
|
||||||
|
continue
|
||||||
|
|
||||||
|
self.cached_clips[f] = {
|
||||||
|
'path': f,
|
||||||
|
'camera': camera,
|
||||||
|
'start_time': start_time.timestamp(),
|
||||||
|
'duration': duration
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(self.events_in_process) > 0:
|
||||||
|
earliest_event = min(self.events_in_process.values(), key=lambda x:x['start_time'])['start_time']
|
||||||
|
else:
|
||||||
|
earliest_event = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
# if the earliest event exceeds the max seconds, cap it
|
||||||
|
max_seconds = self.config.clips.max_seconds
|
||||||
|
if datetime.datetime.now().timestamp()-earliest_event > max_seconds:
|
||||||
|
earliest_event = datetime.datetime.now().timestamp()-max_seconds
|
||||||
|
|
||||||
|
for f, data in list(self.cached_clips.items()):
|
||||||
|
if earliest_event-90 > data['start_time']+data['duration']:
|
||||||
|
del self.cached_clips[f]
|
||||||
|
os.remove(os.path.join(CACHE_DIR,f))
|
||||||
|
|
||||||
|
def create_clip(self, camera, event_data, pre_capture, post_capture):
|
||||||
|
# get all clips from the camera with the event sorted
|
||||||
|
sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time'])
|
||||||
|
|
||||||
|
while len(sorted_clips) == 0 or sorted_clips[-1]['start_time'] + sorted_clips[-1]['duration'] < event_data['end_time']+post_capture:
|
||||||
|
logger.debug(f"No cache clips for {camera}. Waiting...")
|
||||||
|
time.sleep(5)
|
||||||
|
self.refresh_cache()
|
||||||
|
# get all clips from the camera with the event sorted
|
||||||
|
sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time'])
|
||||||
|
|
||||||
|
playlist_start = event_data['start_time']-pre_capture
|
||||||
|
playlist_end = event_data['end_time']+post_capture
|
||||||
|
playlist_lines = []
|
||||||
|
for clip in sorted_clips:
|
||||||
|
# clip ends before playlist start time, skip
|
||||||
|
if clip['start_time']+clip['duration'] < playlist_start:
|
||||||
|
continue
|
||||||
|
# clip starts after playlist ends, finish
|
||||||
|
if clip['start_time'] > playlist_end:
|
||||||
|
break
|
||||||
|
playlist_lines.append(f"file '{os.path.join(CACHE_DIR,clip['path'])}'")
|
||||||
|
# if this is the starting clip, add an inpoint
|
||||||
|
if clip['start_time'] < playlist_start:
|
||||||
|
playlist_lines.append(f"inpoint {int(playlist_start-clip['start_time'])}")
|
||||||
|
# if this is the ending clip, add an outpoint
|
||||||
|
if clip['start_time']+clip['duration'] > playlist_end:
|
||||||
|
playlist_lines.append(f"outpoint {int(playlist_end-clip['start_time'])}")
|
||||||
|
|
||||||
|
clip_name = f"{camera}-{event_data['id']}"
|
||||||
|
ffmpeg_cmd = [
|
||||||
|
'ffmpeg',
|
||||||
|
'-y',
|
||||||
|
'-protocol_whitelist',
|
||||||
|
'pipe,file',
|
||||||
|
'-f',
|
||||||
|
'concat',
|
||||||
|
'-safe',
|
||||||
|
'0',
|
||||||
|
'-i',
|
||||||
|
'-',
|
||||||
|
'-c',
|
||||||
|
'copy',
|
||||||
|
'-movflags',
|
||||||
|
'+faststart',
|
||||||
|
f"{os.path.join(CLIPS_DIR, clip_name)}.mp4"
|
||||||
|
]
|
||||||
|
|
||||||
|
p = sp.run(ffmpeg_cmd, input="\n".join(playlist_lines), encoding='ascii', capture_output=True)
|
||||||
|
if p.returncode != 0:
|
||||||
|
logger.error(p.stderr)
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
while True:
|
||||||
|
if self.stop_event.is_set():
|
||||||
|
logger.info(f"Exiting event processor...")
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
event_type, camera, event_data = self.event_queue.get(timeout=10)
|
||||||
|
except queue.Empty:
|
||||||
|
if not self.stop_event.is_set():
|
||||||
|
self.refresh_cache()
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.debug(f"Event received: {event_type} {camera} {event_data['id']}")
|
||||||
|
self.refresh_cache()
|
||||||
|
|
||||||
|
if event_type == 'start':
|
||||||
|
self.events_in_process[event_data['id']] = event_data
|
||||||
|
|
||||||
|
if event_type == 'end':
|
||||||
|
clips_config = self.config.cameras[camera].clips
|
||||||
|
|
||||||
|
if not event_data['false_positive']:
|
||||||
|
clip_created = False
|
||||||
|
if clips_config.enabled and (clips_config.objects is None or event_data['label'] in clips_config.objects):
|
||||||
|
clip_created = self.create_clip(camera, event_data, clips_config.pre_capture, clips_config.post_capture)
|
||||||
|
|
||||||
|
Event.create(
|
||||||
|
id=event_data['id'],
|
||||||
|
label=event_data['label'],
|
||||||
|
camera=camera,
|
||||||
|
start_time=event_data['start_time'],
|
||||||
|
end_time=event_data['end_time'],
|
||||||
|
top_score=event_data['top_score'],
|
||||||
|
false_positive=event_data['false_positive'],
|
||||||
|
zones=list(event_data['entered_zones']),
|
||||||
|
thumbnail=event_data['thumbnail'],
|
||||||
|
has_clip=clip_created,
|
||||||
|
has_snapshot=event_data['has_snapshot'],
|
||||||
|
)
|
||||||
|
del self.events_in_process[event_data['id']]
|
||||||
|
self.event_processed_queue.put((event_data['id'], camera))
|
||||||
|
|
||||||
|
class EventCleanup(threading.Thread):
|
||||||
|
def __init__(self, config: FrigateConfig, stop_event):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.name = 'event_cleanup'
|
||||||
|
self.config = config
|
||||||
|
self.stop_event = stop_event
|
||||||
|
self.camera_keys = list(self.config.cameras.keys())
|
||||||
|
|
||||||
|
def expire(self, media):
|
||||||
|
## Expire events from unlisted cameras based on the global config
|
||||||
|
if media == 'clips':
|
||||||
|
retain_config = self.config.clips.retain
|
||||||
|
file_extension = 'mp4'
|
||||||
|
update_params = {'has_clip': False}
|
||||||
|
else:
|
||||||
|
retain_config = self.config.snapshots.retain
|
||||||
|
file_extension = 'jpg'
|
||||||
|
update_params = {'has_snapshot': False}
|
||||||
|
|
||||||
|
distinct_labels = (Event.select(Event.label)
|
||||||
|
.where(Event.camera.not_in(self.camera_keys))
|
||||||
|
.distinct())
|
||||||
|
|
||||||
|
# loop over object types in db
|
||||||
|
for l in distinct_labels:
|
||||||
|
# get expiration time for this label
|
||||||
|
expire_days = retain_config.objects.get(l.label, retain_config.default)
|
||||||
|
expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp()
|
||||||
|
# grab all events after specific time
|
||||||
|
expired_events = (
|
||||||
|
Event.select()
|
||||||
|
.where(Event.camera.not_in(self.camera_keys),
|
||||||
|
Event.start_time < expire_after,
|
||||||
|
Event.label == l.label)
|
||||||
|
)
|
||||||
|
# delete the media from disk
|
||||||
|
for event in expired_events:
|
||||||
|
media_name = f"{event.camera}-{event.id}"
|
||||||
|
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
|
||||||
|
media.unlink(missing_ok=True)
|
||||||
|
# update the clips attribute for the db entry
|
||||||
|
update_query = (
|
||||||
|
Event.update(update_params)
|
||||||
|
.where(Event.camera.not_in(self.camera_keys),
|
||||||
|
Event.start_time < expire_after,
|
||||||
|
Event.label == l.label)
|
||||||
|
)
|
||||||
|
update_query.execute()
|
||||||
|
|
||||||
|
## Expire events from cameras based on the camera config
|
||||||
|
for name, camera in self.config.cameras.items():
|
||||||
|
if media == 'clips':
|
||||||
|
retain_config = camera.clips.retain
|
||||||
|
else:
|
||||||
|
retain_config = camera.snapshots.retain
|
||||||
|
# get distinct objects in database for this camera
|
||||||
|
distinct_labels = (Event.select(Event.label)
|
||||||
|
.where(Event.camera == name)
|
||||||
|
.distinct())
|
||||||
|
|
||||||
|
# loop over object types in db
|
||||||
|
for l in distinct_labels:
|
||||||
|
# get expiration time for this label
|
||||||
|
expire_days = retain_config.objects.get(l.label, retain_config.default)
|
||||||
|
expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp()
|
||||||
|
# grab all events after specific time
|
||||||
|
expired_events = (
|
||||||
|
Event.select()
|
||||||
|
.where(Event.camera == name,
|
||||||
|
Event.start_time < expire_after,
|
||||||
|
Event.label == l.label)
|
||||||
|
)
|
||||||
|
# delete the grabbed clips from disk
|
||||||
|
for event in expired_events:
|
||||||
|
media_name = f"{event.camera}-{event.id}"
|
||||||
|
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
|
||||||
|
media.unlink(missing_ok=True)
|
||||||
|
# update the clips attribute for the db entry
|
||||||
|
update_query = (
|
||||||
|
Event.update(update_params)
|
||||||
|
.where( Event.camera == name,
|
||||||
|
Event.start_time < expire_after,
|
||||||
|
Event.label == l.label)
|
||||||
|
)
|
||||||
|
update_query.execute()
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
counter = 0
|
||||||
|
while(True):
|
||||||
|
if self.stop_event.is_set():
|
||||||
|
logger.info(f"Exiting event cleanup...")
|
||||||
|
break
|
||||||
|
|
||||||
|
# only expire events every 10 minutes, but check for stop events every 10 seconds
|
||||||
|
time.sleep(10)
|
||||||
|
counter = counter + 1
|
||||||
|
if counter < 60:
|
||||||
|
continue
|
||||||
|
counter = 0
|
||||||
|
|
||||||
|
self.expire('clips')
|
||||||
|
self.expire('snapshots')
|
||||||
|
|
||||||
|
# drop events from db where has_clip and has_snapshot are false
|
||||||
|
delete_query = (
|
||||||
|
Event.delete()
|
||||||
|
.where( Event.has_clip == False,
|
||||||
|
Event.has_snapshot == False)
|
||||||
|
)
|
||||||
|
delete_query.execute()
|
||||||
268
frigate/http.py
Normal file
@@ -0,0 +1,268 @@
|
|||||||
|
import base64
|
||||||
|
import datetime
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from functools import reduce
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
from flask import (Blueprint, Flask, Response, current_app, jsonify,
|
||||||
|
make_response, request)
|
||||||
|
from peewee import SqliteDatabase, operator, fn, DoesNotExist
|
||||||
|
from playhouse.shortcuts import model_to_dict
|
||||||
|
|
||||||
|
from frigate.models import Event
|
||||||
|
from frigate.stats import stats_snapshot
|
||||||
|
from frigate.util import calculate_region
|
||||||
|
from frigate.version import VERSION
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
bp = Blueprint('frigate', __name__)
|
||||||
|
|
||||||
|
def create_app(frigate_config, database: SqliteDatabase, stats_tracking, detected_frames_processor):
|
||||||
|
app = Flask(__name__)
|
||||||
|
|
||||||
|
@app.before_request
|
||||||
|
def _db_connect():
|
||||||
|
database.connect()
|
||||||
|
|
||||||
|
@app.teardown_request
|
||||||
|
def _db_close(exc):
|
||||||
|
if not database.is_closed():
|
||||||
|
database.close()
|
||||||
|
|
||||||
|
app.frigate_config = frigate_config
|
||||||
|
app.stats_tracking = stats_tracking
|
||||||
|
app.detected_frames_processor = detected_frames_processor
|
||||||
|
|
||||||
|
app.register_blueprint(bp)
|
||||||
|
|
||||||
|
return app
|
||||||
|
|
||||||
|
@bp.route('/')
|
||||||
|
def is_healthy():
|
||||||
|
return "Frigate is running. Alive and healthy!"
|
||||||
|
|
||||||
|
@bp.route('/events/summary')
|
||||||
|
def events_summary():
|
||||||
|
has_clip = request.args.get('has_clip', type=int)
|
||||||
|
has_snapshot = request.args.get('has_snapshot', type=int)
|
||||||
|
|
||||||
|
clauses = []
|
||||||
|
|
||||||
|
if not has_clip is None:
|
||||||
|
clauses.append((Event.has_clip == has_clip))
|
||||||
|
|
||||||
|
if not has_snapshot is None:
|
||||||
|
clauses.append((Event.has_snapshot == has_snapshot))
|
||||||
|
|
||||||
|
if len(clauses) == 0:
|
||||||
|
clauses.append((1 == 1))
|
||||||
|
|
||||||
|
groups = (
|
||||||
|
Event
|
||||||
|
.select(
|
||||||
|
Event.camera,
|
||||||
|
Event.label,
|
||||||
|
fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')).alias('day'),
|
||||||
|
Event.zones,
|
||||||
|
fn.COUNT(Event.id).alias('count')
|
||||||
|
)
|
||||||
|
.where(reduce(operator.and_, clauses))
|
||||||
|
.group_by(
|
||||||
|
Event.camera,
|
||||||
|
Event.label,
|
||||||
|
fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')),
|
||||||
|
Event.zones
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return jsonify([e for e in groups.dicts()])
|
||||||
|
|
||||||
|
@bp.route('/events/<id>')
|
||||||
|
def event(id):
|
||||||
|
try:
|
||||||
|
return model_to_dict(Event.get(Event.id == id))
|
||||||
|
except DoesNotExist:
|
||||||
|
return "Event not found", 404
|
||||||
|
|
||||||
|
@bp.route('/events/<id>/thumbnail.jpg')
|
||||||
|
def event_snapshot(id):
|
||||||
|
format = request.args.get('format', 'ios')
|
||||||
|
thumbnail_bytes = None
|
||||||
|
try:
|
||||||
|
event = Event.get(Event.id == id)
|
||||||
|
thumbnail_bytes = base64.b64decode(event.thumbnail)
|
||||||
|
except DoesNotExist:
|
||||||
|
# see if the object is currently being tracked
|
||||||
|
try:
|
||||||
|
for camera_state in current_app.detected_frames_processor.camera_states.values():
|
||||||
|
if id in camera_state.tracked_objects:
|
||||||
|
tracked_obj = camera_state.tracked_objects.get(id)
|
||||||
|
if not tracked_obj is None:
|
||||||
|
thumbnail_bytes = tracked_obj.get_jpg_bytes()
|
||||||
|
except:
|
||||||
|
return "Event not found", 404
|
||||||
|
|
||||||
|
if thumbnail_bytes is None:
|
||||||
|
return "Event not found", 404
|
||||||
|
|
||||||
|
# android notifications prefer a 2:1 ratio
|
||||||
|
if format == 'android':
|
||||||
|
jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
|
||||||
|
img = cv2.imdecode(jpg_as_np, flags=1)
|
||||||
|
thumbnail = cv2.copyMakeBorder(img, 0, 0, int(img.shape[1]*0.5), int(img.shape[1]*0.5), cv2.BORDER_CONSTANT, (0,0,0))
|
||||||
|
ret, jpg = cv2.imencode('.jpg', thumbnail)
|
||||||
|
thumbnail_bytes = jpg.tobytes()
|
||||||
|
|
||||||
|
response = make_response(thumbnail_bytes)
|
||||||
|
response.headers['Content-Type'] = 'image/jpg'
|
||||||
|
return response
|
||||||
|
|
||||||
|
@bp.route('/events')
|
||||||
|
def events():
|
||||||
|
limit = request.args.get('limit', 100)
|
||||||
|
camera = request.args.get('camera')
|
||||||
|
label = request.args.get('label')
|
||||||
|
zone = request.args.get('zone')
|
||||||
|
after = request.args.get('after', type=int)
|
||||||
|
before = request.args.get('before', type=int)
|
||||||
|
has_clip = request.args.get('has_clip', type=int)
|
||||||
|
has_snapshot = request.args.get('has_snapshot', type=int)
|
||||||
|
|
||||||
|
clauses = []
|
||||||
|
|
||||||
|
if camera:
|
||||||
|
clauses.append((Event.camera == camera))
|
||||||
|
|
||||||
|
if label:
|
||||||
|
clauses.append((Event.label == label))
|
||||||
|
|
||||||
|
if zone:
|
||||||
|
clauses.append((Event.zones.cast('text') % f"*\"{zone}\"*"))
|
||||||
|
|
||||||
|
if after:
|
||||||
|
clauses.append((Event.start_time >= after))
|
||||||
|
|
||||||
|
if before:
|
||||||
|
clauses.append((Event.start_time <= before))
|
||||||
|
|
||||||
|
if not has_clip is None:
|
||||||
|
clauses.append((Event.has_clip == has_clip))
|
||||||
|
|
||||||
|
if not has_snapshot is None:
|
||||||
|
clauses.append((Event.has_snapshot == has_snapshot))
|
||||||
|
|
||||||
|
if len(clauses) == 0:
|
||||||
|
clauses.append((1 == 1))
|
||||||
|
|
||||||
|
events = (Event.select()
|
||||||
|
.where(reduce(operator.and_, clauses))
|
||||||
|
.order_by(Event.start_time.desc())
|
||||||
|
.limit(limit))
|
||||||
|
|
||||||
|
return jsonify([model_to_dict(e) for e in events])
|
||||||
|
|
||||||
|
@bp.route('/config')
|
||||||
|
def config():
|
||||||
|
return jsonify(current_app.frigate_config.to_dict())
|
||||||
|
|
||||||
|
@bp.route('/version')
|
||||||
|
def version():
|
||||||
|
return VERSION
|
||||||
|
|
||||||
|
@bp.route('/stats')
|
||||||
|
def stats():
|
||||||
|
stats = stats_snapshot(current_app.stats_tracking)
|
||||||
|
return jsonify(stats)
|
||||||
|
|
||||||
|
@bp.route('/<camera_name>/<label>/best.jpg')
|
||||||
|
def best(camera_name, label):
|
||||||
|
if camera_name in current_app.frigate_config.cameras:
|
||||||
|
best_object = current_app.detected_frames_processor.get_best(camera_name, label)
|
||||||
|
best_frame = best_object.get('frame')
|
||||||
|
if best_frame is None:
|
||||||
|
best_frame = np.zeros((720,1280,3), np.uint8)
|
||||||
|
else:
|
||||||
|
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
|
||||||
|
|
||||||
|
crop = bool(request.args.get('crop', 0, type=int))
|
||||||
|
if crop:
|
||||||
|
box = best_object.get('box', (0,0,300,300))
|
||||||
|
region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
|
||||||
|
best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
|
||||||
|
|
||||||
|
height = int(request.args.get('h', str(best_frame.shape[0])))
|
||||||
|
width = int(height*best_frame.shape[1]/best_frame.shape[0])
|
||||||
|
|
||||||
|
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||||
|
ret, jpg = cv2.imencode('.jpg', best_frame)
|
||||||
|
response = make_response(jpg.tobytes())
|
||||||
|
response.headers['Content-Type'] = 'image/jpg'
|
||||||
|
return response
|
||||||
|
else:
|
||||||
|
return "Camera named {} not found".format(camera_name), 404
|
||||||
|
|
||||||
|
@bp.route('/<camera_name>')
|
||||||
|
def mjpeg_feed(camera_name):
|
||||||
|
fps = int(request.args.get('fps', '3'))
|
||||||
|
height = int(request.args.get('h', '360'))
|
||||||
|
draw_options = {
|
||||||
|
'bounding_boxes': request.args.get('bbox', type=int),
|
||||||
|
'timestamp': request.args.get('timestamp', type=int),
|
||||||
|
'zones': request.args.get('zones', type=int),
|
||||||
|
'mask': request.args.get('mask', type=int),
|
||||||
|
'motion_boxes': request.args.get('motion', type=int),
|
||||||
|
'regions': request.args.get('regions', type=int),
|
||||||
|
}
|
||||||
|
if camera_name in current_app.frigate_config.cameras:
|
||||||
|
# return a multipart response
|
||||||
|
return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height, draw_options),
|
||||||
|
mimetype='multipart/x-mixed-replace; boundary=frame')
|
||||||
|
else:
|
||||||
|
return "Camera named {} not found".format(camera_name), 404
|
||||||
|
|
||||||
|
@bp.route('/<camera_name>/latest.jpg')
|
||||||
|
def latest_frame(camera_name):
|
||||||
|
draw_options = {
|
||||||
|
'bounding_boxes': request.args.get('bbox', type=int),
|
||||||
|
'timestamp': request.args.get('timestamp', type=int),
|
||||||
|
'zones': request.args.get('zones', type=int),
|
||||||
|
'mask': request.args.get('mask', type=int),
|
||||||
|
'motion_boxes': request.args.get('motion', type=int),
|
||||||
|
'regions': request.args.get('regions', type=int),
|
||||||
|
}
|
||||||
|
if camera_name in current_app.frigate_config.cameras:
|
||||||
|
# max out at specified FPS
|
||||||
|
frame = current_app.detected_frames_processor.get_current_frame(camera_name, draw_options)
|
||||||
|
if frame is None:
|
||||||
|
frame = np.zeros((720,1280,3), np.uint8)
|
||||||
|
|
||||||
|
height = int(request.args.get('h', str(frame.shape[0])))
|
||||||
|
width = int(height*frame.shape[1]/frame.shape[0])
|
||||||
|
|
||||||
|
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||||
|
|
||||||
|
ret, jpg = cv2.imencode('.jpg', frame)
|
||||||
|
response = make_response(jpg.tobytes())
|
||||||
|
response.headers['Content-Type'] = 'image/jpg'
|
||||||
|
return response
|
||||||
|
else:
|
||||||
|
return "Camera named {} not found".format(camera_name), 404
|
||||||
|
|
||||||
|
def imagestream(detected_frames_processor, camera_name, fps, height, draw_options):
|
||||||
|
while True:
|
||||||
|
# max out at specified FPS
|
||||||
|
time.sleep(1/fps)
|
||||||
|
frame = detected_frames_processor.get_current_frame(camera_name, draw_options)
|
||||||
|
if frame is None:
|
||||||
|
frame = np.zeros((height,int(height*16/9),3), np.uint8)
|
||||||
|
|
||||||
|
width = int(height*frame.shape[1]/frame.shape[0])
|
||||||
|
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
|
||||||
|
|
||||||
|
ret, jpg = cv2.imencode('.jpg', frame)
|
||||||
|
yield (b'--frame\r\n'
|
||||||
|
b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
|
||||||
77
frigate/log.py
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
# adapted from https://medium.com/@jonathonbao/python3-logging-with-multiprocessing-f51f460b8778
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
import os
|
||||||
|
import signal
|
||||||
|
import queue
|
||||||
|
import multiprocessing as mp
|
||||||
|
from logging import handlers
|
||||||
|
from setproctitle import setproctitle
|
||||||
|
|
||||||
|
|
||||||
|
def listener_configurer():
|
||||||
|
root = logging.getLogger()
|
||||||
|
console_handler = logging.StreamHandler()
|
||||||
|
formatter = logging.Formatter('%(name)-30s %(levelname)-8s: %(message)s')
|
||||||
|
console_handler.setFormatter(formatter)
|
||||||
|
root.addHandler(console_handler)
|
||||||
|
root.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
def root_configurer(queue):
|
||||||
|
h = handlers.QueueHandler(queue)
|
||||||
|
root = logging.getLogger()
|
||||||
|
root.addHandler(h)
|
||||||
|
root.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
def log_process(log_queue):
|
||||||
|
stop_event = mp.Event()
|
||||||
|
def receiveSignal(signalNumber, frame):
|
||||||
|
stop_event.set()
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, receiveSignal)
|
||||||
|
signal.signal(signal.SIGINT, receiveSignal)
|
||||||
|
|
||||||
|
threading.current_thread().name = f"logger"
|
||||||
|
setproctitle("frigate.logger")
|
||||||
|
listener_configurer()
|
||||||
|
while True:
|
||||||
|
if stop_event.is_set() and log_queue.empty():
|
||||||
|
break
|
||||||
|
try:
|
||||||
|
record = log_queue.get(timeout=5)
|
||||||
|
except queue.Empty:
|
||||||
|
continue
|
||||||
|
logger = logging.getLogger(record.name)
|
||||||
|
logger.handle(record)
|
||||||
|
|
||||||
|
# based on https://codereview.stackexchange.com/a/17959
|
||||||
|
class LogPipe(threading.Thread):
|
||||||
|
def __init__(self, log_name, level):
|
||||||
|
"""Setup the object with a logger and a loglevel
|
||||||
|
and start the thread
|
||||||
|
"""
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.daemon = False
|
||||||
|
self.logger = logging.getLogger(log_name)
|
||||||
|
self.level = level
|
||||||
|
self.fdRead, self.fdWrite = os.pipe()
|
||||||
|
self.pipeReader = os.fdopen(self.fdRead)
|
||||||
|
self.start()
|
||||||
|
|
||||||
|
def fileno(self):
|
||||||
|
"""Return the write file descriptor of the pipe
|
||||||
|
"""
|
||||||
|
return self.fdWrite
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
"""Run the thread, logging everything.
|
||||||
|
"""
|
||||||
|
for line in iter(self.pipeReader.readline, ''):
|
||||||
|
self.logger.log(self.level, line.strip('\n'))
|
||||||
|
|
||||||
|
self.pipeReader.close()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Close the write end of the pipe.
|
||||||
|
"""
|
||||||
|
os.close(self.fdWrite)
|
||||||
16
frigate/models.py
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
from peewee import *
|
||||||
|
from playhouse.sqlite_ext import *
|
||||||
|
|
||||||
|
|
||||||
|
class Event(Model):
|
||||||
|
id = CharField(null=False, primary_key=True, max_length=30)
|
||||||
|
label = CharField(index=True, max_length=20)
|
||||||
|
camera = CharField(index=True, max_length=20)
|
||||||
|
start_time = DateTimeField()
|
||||||
|
end_time = DateTimeField()
|
||||||
|
top_score = FloatField()
|
||||||
|
false_positive = BooleanField()
|
||||||
|
zones = JSONField()
|
||||||
|
thumbnail = TextField()
|
||||||
|
has_clip = BooleanField(default=True)
|
||||||
|
has_snapshot = BooleanField(default=True)
|
||||||
85
frigate/motion.py
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
import cv2
|
||||||
|
import imutils
|
||||||
|
import numpy as np
|
||||||
|
from frigate.config import MotionConfig
|
||||||
|
|
||||||
|
|
||||||
|
class MotionDetector():
|
||||||
|
def __init__(self, frame_shape, config: MotionConfig):
|
||||||
|
self.config = config
|
||||||
|
self.frame_shape = frame_shape
|
||||||
|
self.resize_factor = frame_shape[0]/config.frame_height
|
||||||
|
self.motion_frame_size = (config.frame_height, config.frame_height*frame_shape[1]//frame_shape[0])
|
||||||
|
self.avg_frame = np.zeros(self.motion_frame_size, np.float)
|
||||||
|
self.avg_delta = np.zeros(self.motion_frame_size, np.float)
|
||||||
|
self.motion_frame_count = 0
|
||||||
|
self.frame_counter = 0
|
||||||
|
resized_mask = cv2.resize(config.mask, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
|
||||||
|
self.mask = np.where(resized_mask==[0])
|
||||||
|
|
||||||
|
def detect(self, frame):
|
||||||
|
motion_boxes = []
|
||||||
|
|
||||||
|
gray = frame[0:self.frame_shape[0], 0:self.frame_shape[1]]
|
||||||
|
|
||||||
|
# resize frame
|
||||||
|
resized_frame = cv2.resize(gray, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
|
||||||
|
|
||||||
|
# TODO: can I improve the contrast of the grayscale image here?
|
||||||
|
|
||||||
|
# convert to grayscale
|
||||||
|
# resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2GRAY)
|
||||||
|
|
||||||
|
# mask frame
|
||||||
|
resized_frame[self.mask] = [255]
|
||||||
|
|
||||||
|
# it takes ~30 frames to establish a baseline
|
||||||
|
# dont bother looking for motion
|
||||||
|
if self.frame_counter < 30:
|
||||||
|
self.frame_counter += 1
|
||||||
|
else:
|
||||||
|
# compare to average
|
||||||
|
frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame))
|
||||||
|
|
||||||
|
# compute the average delta over the past few frames
|
||||||
|
# higher values mean the current frame impacts the delta a lot, and a single raindrop may
|
||||||
|
# register as motion, too low and a fast moving person wont be detected as motion
|
||||||
|
cv2.accumulateWeighted(frameDelta, self.avg_delta, self.config.delta_alpha)
|
||||||
|
|
||||||
|
# compute the threshold image for the current frame
|
||||||
|
# TODO: threshold
|
||||||
|
current_thresh = cv2.threshold(frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY)[1]
|
||||||
|
|
||||||
|
# black out everything in the avg_delta where there isnt motion in the current frame
|
||||||
|
avg_delta_image = cv2.convertScaleAbs(self.avg_delta)
|
||||||
|
avg_delta_image = cv2.bitwise_and(avg_delta_image, current_thresh)
|
||||||
|
|
||||||
|
# then look for deltas above the threshold, but only in areas where there is a delta
|
||||||
|
# in the current frame. this prevents deltas from previous frames from being included
|
||||||
|
thresh = cv2.threshold(avg_delta_image, self.config.threshold, 255, cv2.THRESH_BINARY)[1]
|
||||||
|
|
||||||
|
# dilate the thresholded image to fill in holes, then find contours
|
||||||
|
# on thresholded image
|
||||||
|
thresh = cv2.dilate(thresh, None, iterations=2)
|
||||||
|
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
|
cnts = imutils.grab_contours(cnts)
|
||||||
|
|
||||||
|
# loop over the contours
|
||||||
|
for c in cnts:
|
||||||
|
# if the contour is big enough, count it as motion
|
||||||
|
contour_area = cv2.contourArea(c)
|
||||||
|
if contour_area > self.config.contour_area:
|
||||||
|
x, y, w, h = cv2.boundingRect(c)
|
||||||
|
motion_boxes.append((int(x*self.resize_factor), int(y*self.resize_factor), int((x+w)*self.resize_factor), int((y+h)*self.resize_factor)))
|
||||||
|
|
||||||
|
if len(motion_boxes) > 0:
|
||||||
|
self.motion_frame_count += 1
|
||||||
|
if self.motion_frame_count >= 10:
|
||||||
|
# only average in the current frame if the difference persists for a bit
|
||||||
|
cv2.accumulateWeighted(resized_frame, self.avg_frame, self.config.frame_alpha)
|
||||||
|
else:
|
||||||
|
# when no motion, just keep averaging the frames together
|
||||||
|
cv2.accumulateWeighted(resized_frame, self.avg_frame, self.config.frame_alpha)
|
||||||
|
self.motion_frame_count = 0
|
||||||
|
|
||||||
|
return motion_boxes
|
||||||
144
frigate/mqtt.py
@@ -1,33 +1,125 @@
|
|||||||
import json
|
import logging
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
class MqttObjectPublisher(threading.Thread):
|
import paho.mqtt.client as mqtt
|
||||||
def __init__(self, client, topic_prefix, objects_parsed, detected_objects):
|
|
||||||
threading.Thread.__init__(self)
|
|
||||||
self.client = client
|
|
||||||
self.topic_prefix = topic_prefix
|
|
||||||
self.objects_parsed = objects_parsed
|
|
||||||
self._detected_objects = detected_objects
|
|
||||||
|
|
||||||
def run(self):
|
from frigate.config import FrigateConfig
|
||||||
last_sent_payload = ""
|
|
||||||
while True:
|
|
||||||
|
|
||||||
# initialize the payload
|
logger = logging.getLogger(__name__)
|
||||||
payload = {}
|
|
||||||
|
|
||||||
# wait until objects have been parsed
|
def create_mqtt_client(config: FrigateConfig, camera_metrics):
|
||||||
with self.objects_parsed:
|
mqtt_config = config.mqtt
|
||||||
self.objects_parsed.wait()
|
|
||||||
|
|
||||||
# add all the person scores in detected objects
|
def on_clips_command(client, userdata, message):
|
||||||
detected_objects = self._detected_objects.copy()
|
payload = message.payload.decode()
|
||||||
person_score = sum([obj['score'] for obj in detected_objects if obj['name'] == 'person'])
|
logger.debug(f"on_clips_toggle: {message.topic} {payload}")
|
||||||
# if the person score is more than 100, set person to ON
|
|
||||||
payload['person'] = 'ON' if int(person_score*100) > 100 else 'OFF'
|
|
||||||
|
|
||||||
# send message for objects if different
|
camera_name = message.topic.split('/')[-3]
|
||||||
new_payload = json.dumps(payload, sort_keys=True)
|
|
||||||
if new_payload != last_sent_payload:
|
clips_settings = config.cameras[camera_name].clips
|
||||||
last_sent_payload = new_payload
|
|
||||||
self.client.publish(self.topic_prefix+'/objects', new_payload, retain=False)
|
if payload == 'ON':
|
||||||
|
if not clips_settings.enabled:
|
||||||
|
logger.info(f"Turning on clips for {camera_name} via mqtt")
|
||||||
|
clips_settings._enabled = True
|
||||||
|
elif payload == 'OFF':
|
||||||
|
if clips_settings.enabled:
|
||||||
|
logger.info(f"Turning off clips for {camera_name} via mqtt")
|
||||||
|
clips_settings._enabled = False
|
||||||
|
else:
|
||||||
|
logger.warning(f"Received unsupported value at {message.topic}: {payload}")
|
||||||
|
|
||||||
|
state_topic = f"{message.topic[:-4]}/state"
|
||||||
|
client.publish(state_topic, payload, retain=True)
|
||||||
|
|
||||||
|
def on_snapshots_command(client, userdata, message):
|
||||||
|
payload = message.payload.decode()
|
||||||
|
logger.debug(f"on_snapshots_toggle: {message.topic} {payload}")
|
||||||
|
|
||||||
|
camera_name = message.topic.split('/')[-3]
|
||||||
|
|
||||||
|
snapshots_settings = config.cameras[camera_name].snapshots
|
||||||
|
|
||||||
|
if payload == 'ON':
|
||||||
|
if not snapshots_settings.enabled:
|
||||||
|
logger.info(f"Turning on snapshots for {camera_name} via mqtt")
|
||||||
|
snapshots_settings._enabled = True
|
||||||
|
elif payload == 'OFF':
|
||||||
|
if snapshots_settings.enabled:
|
||||||
|
logger.info(f"Turning off snapshots for {camera_name} via mqtt")
|
||||||
|
snapshots_settings._enabled = False
|
||||||
|
else:
|
||||||
|
logger.warning(f"Received unsupported value at {message.topic}: {payload}")
|
||||||
|
|
||||||
|
state_topic = f"{message.topic[:-4]}/state"
|
||||||
|
client.publish(state_topic, payload, retain=True)
|
||||||
|
|
||||||
|
def on_detect_command(client, userdata, message):
|
||||||
|
payload = message.payload.decode()
|
||||||
|
logger.debug(f"on_detect_toggle: {message.topic} {payload}")
|
||||||
|
|
||||||
|
camera_name = message.topic.split('/')[-3]
|
||||||
|
|
||||||
|
detect_settings = config.cameras[camera_name].detect
|
||||||
|
|
||||||
|
if payload == 'ON':
|
||||||
|
if not camera_metrics[camera_name]["detection_enabled"].value:
|
||||||
|
logger.info(f"Turning on detection for {camera_name} via mqtt")
|
||||||
|
camera_metrics[camera_name]["detection_enabled"].value = True
|
||||||
|
detect_settings._enabled = True
|
||||||
|
elif payload == 'OFF':
|
||||||
|
if camera_metrics[camera_name]["detection_enabled"].value:
|
||||||
|
logger.info(f"Turning off detection for {camera_name} via mqtt")
|
||||||
|
camera_metrics[camera_name]["detection_enabled"].value = False
|
||||||
|
detect_settings._enabled = False
|
||||||
|
else:
|
||||||
|
logger.warning(f"Received unsupported value at {message.topic}: {payload}")
|
||||||
|
|
||||||
|
state_topic = f"{message.topic[:-4]}/state"
|
||||||
|
client.publish(state_topic, payload, retain=True)
|
||||||
|
|
||||||
|
def on_connect(client, userdata, flags, rc):
|
||||||
|
threading.current_thread().name = "mqtt"
|
||||||
|
if rc != 0:
|
||||||
|
if rc == 3:
|
||||||
|
logger.error("MQTT Server unavailable")
|
||||||
|
elif rc == 4:
|
||||||
|
logger.error("MQTT Bad username or password")
|
||||||
|
elif rc == 5:
|
||||||
|
logger.error("MQTT Not authorized")
|
||||||
|
else:
|
||||||
|
logger.error("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
|
||||||
|
|
||||||
|
logger.info("MQTT connected")
|
||||||
|
client.publish(mqtt_config.topic_prefix+'/available', 'online', retain=True)
|
||||||
|
|
||||||
|
client = mqtt.Client(client_id=mqtt_config.client_id)
|
||||||
|
client.on_connect = on_connect
|
||||||
|
client.will_set(mqtt_config.topic_prefix+'/available', payload='offline', qos=1, retain=True)
|
||||||
|
|
||||||
|
# register callbacks
|
||||||
|
for name in config.cameras.keys():
|
||||||
|
client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/clips/set", on_clips_command)
|
||||||
|
client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/snapshots/set", on_snapshots_command)
|
||||||
|
client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/detect/set", on_detect_command)
|
||||||
|
|
||||||
|
if not mqtt_config.user is None:
|
||||||
|
client.username_pw_set(mqtt_config.user, password=mqtt_config.password)
|
||||||
|
try:
|
||||||
|
client.connect(mqtt_config.host, mqtt_config.port, 60)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unable to connect to MQTT server: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
client.loop_start()
|
||||||
|
|
||||||
|
for name in config.cameras.keys():
|
||||||
|
client.publish(f"{mqtt_config.topic_prefix}/{name}/clips/state", 'ON' if config.cameras[name].clips.enabled else 'OFF', retain=True)
|
||||||
|
client.publish(f"{mqtt_config.topic_prefix}/{name}/snapshots/state", 'ON' if config.cameras[name].clips.enabled else 'OFF', retain=True)
|
||||||
|
client.publish(f"{mqtt_config.topic_prefix}/{name}/detect/state", 'ON' if config.cameras[name].clips.enabled else 'OFF', retain=True)
|
||||||
|
|
||||||
|
client.subscribe(f"{mqtt_config.topic_prefix}/+/clips/set")
|
||||||
|
client.subscribe(f"{mqtt_config.topic_prefix}/+/snapshots/set")
|
||||||
|
client.subscribe(f"{mqtt_config.topic_prefix}/+/detect/set")
|
||||||
|
|
||||||
|
return client
|
||||||
|
|||||||
@@ -1,112 +0,0 @@
|
|||||||
import datetime
|
|
||||||
import time
|
|
||||||
import cv2
|
|
||||||
import threading
|
|
||||||
import numpy as np
|
|
||||||
from edgetpu.detection.engine import DetectionEngine
|
|
||||||
from . util import tonumpyarray
|
|
||||||
|
|
||||||
# Path to frozen detection graph. This is the actual model that is used for the object detection.
|
|
||||||
PATH_TO_CKPT = '/frozen_inference_graph.pb'
|
|
||||||
# List of the strings that is used to add correct label for each box.
|
|
||||||
PATH_TO_LABELS = '/label_map.pbtext'
|
|
||||||
|
|
||||||
# Function to read labels from text files.
|
|
||||||
def ReadLabelFile(file_path):
|
|
||||||
with open(file_path, 'r') as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
ret = {}
|
|
||||||
for line in lines:
|
|
||||||
pair = line.strip().split(maxsplit=1)
|
|
||||||
ret[int(pair[0])] = pair[1].strip()
|
|
||||||
return ret
|
|
||||||
|
|
||||||
class PreppedQueueProcessor(threading.Thread):
|
|
||||||
def __init__(self, cameras, prepped_frame_queue):
|
|
||||||
|
|
||||||
threading.Thread.__init__(self)
|
|
||||||
self.cameras = cameras
|
|
||||||
self.prepped_frame_queue = prepped_frame_queue
|
|
||||||
|
|
||||||
# Load the edgetpu engine and labels
|
|
||||||
self.engine = DetectionEngine(PATH_TO_CKPT)
|
|
||||||
self.labels = ReadLabelFile(PATH_TO_LABELS)
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
# process queue...
|
|
||||||
while True:
|
|
||||||
frame = self.prepped_frame_queue.get()
|
|
||||||
|
|
||||||
# Actual detection.
|
|
||||||
objects = self.engine.DetectWithInputTensor(frame['frame'], threshold=frame['region_threshold'], top_k=3)
|
|
||||||
# print(self.engine.get_inference_time())
|
|
||||||
|
|
||||||
# parse and pass detected objects back to the camera
|
|
||||||
parsed_objects = []
|
|
||||||
for obj in objects:
|
|
||||||
box = obj.bounding_box.flatten().tolist()
|
|
||||||
parsed_objects.append({
|
|
||||||
'frame_time': frame['frame_time'],
|
|
||||||
'name': str(self.labels[obj.label_id]),
|
|
||||||
'score': float(obj.score),
|
|
||||||
'xmin': int((box[0] * frame['region_size']) + frame['region_x_offset']),
|
|
||||||
'ymin': int((box[1] * frame['region_size']) + frame['region_y_offset']),
|
|
||||||
'xmax': int((box[2] * frame['region_size']) + frame['region_x_offset']),
|
|
||||||
'ymax': int((box[3] * frame['region_size']) + frame['region_y_offset'])
|
|
||||||
})
|
|
||||||
self.cameras[frame['camera_name']].add_objects(parsed_objects)
|
|
||||||
|
|
||||||
|
|
||||||
# should this be a region class?
|
|
||||||
class FramePrepper(threading.Thread):
|
|
||||||
def __init__(self, camera_name, shared_frame, frame_time, frame_ready,
|
|
||||||
frame_lock,
|
|
||||||
region_size, region_x_offset, region_y_offset, region_threshold,
|
|
||||||
prepped_frame_queue):
|
|
||||||
|
|
||||||
threading.Thread.__init__(self)
|
|
||||||
self.camera_name = camera_name
|
|
||||||
self.shared_frame = shared_frame
|
|
||||||
self.frame_time = frame_time
|
|
||||||
self.frame_ready = frame_ready
|
|
||||||
self.frame_lock = frame_lock
|
|
||||||
self.region_size = region_size
|
|
||||||
self.region_x_offset = region_x_offset
|
|
||||||
self.region_y_offset = region_y_offset
|
|
||||||
self.region_threshold = region_threshold
|
|
||||||
self.prepped_frame_queue = prepped_frame_queue
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
frame_time = 0.0
|
|
||||||
while True:
|
|
||||||
now = datetime.datetime.now().timestamp()
|
|
||||||
|
|
||||||
with self.frame_ready:
|
|
||||||
# if there isnt a frame ready for processing or it is old, wait for a new frame
|
|
||||||
if self.frame_time.value == frame_time or (now - self.frame_time.value) > 0.5:
|
|
||||||
self.frame_ready.wait()
|
|
||||||
|
|
||||||
# make a copy of the cropped frame
|
|
||||||
with self.frame_lock:
|
|
||||||
cropped_frame = self.shared_frame[self.region_y_offset:self.region_y_offset+self.region_size, self.region_x_offset:self.region_x_offset+self.region_size].copy()
|
|
||||||
frame_time = self.frame_time.value
|
|
||||||
|
|
||||||
# Resize to 300x300 if needed
|
|
||||||
if cropped_frame.shape != (300, 300, 3):
|
|
||||||
cropped_frame = cv2.resize(cropped_frame, dsize=(300, 300), interpolation=cv2.INTER_LINEAR)
|
|
||||||
# Expand dimensions since the model expects images to have shape: [1, 300, 300, 3]
|
|
||||||
frame_expanded = np.expand_dims(cropped_frame, axis=0)
|
|
||||||
|
|
||||||
# add the frame to the queue
|
|
||||||
if not self.prepped_frame_queue.full():
|
|
||||||
self.prepped_frame_queue.put({
|
|
||||||
'camera_name': self.camera_name,
|
|
||||||
'frame_time': frame_time,
|
|
||||||
'frame': frame_expanded.flatten().copy(),
|
|
||||||
'region_size': self.region_size,
|
|
||||||
'region_threshold': self.region_threshold,
|
|
||||||
'region_x_offset': self.region_x_offset,
|
|
||||||
'region_y_offset': self.region_y_offset
|
|
||||||
})
|
|
||||||
else:
|
|
||||||
print("queue full. moving on")
|
|
||||||
547
frigate/object_processing.py
Normal file
@@ -0,0 +1,547 @@
|
|||||||
|
import copy
|
||||||
|
import base64
|
||||||
|
import datetime
|
||||||
|
import hashlib
|
||||||
|
import itertools
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import queue
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from collections import Counter, defaultdict
|
||||||
|
from statistics import mean, median
|
||||||
|
from typing import Callable, Dict
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from frigate.config import FrigateConfig, CameraConfig
|
||||||
|
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
|
||||||
|
from frigate.edgetpu import load_labels
|
||||||
|
from frigate.util import SharedMemoryFrameManager, draw_box_with_label, calculate_region
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
PATH_TO_LABELS = '/labelmap.txt'
|
||||||
|
|
||||||
|
LABELS = load_labels(PATH_TO_LABELS)
|
||||||
|
cmap = plt.cm.get_cmap('tab10', len(LABELS.keys()))
|
||||||
|
|
||||||
|
COLOR_MAP = {}
|
||||||
|
for key, val in LABELS.items():
|
||||||
|
COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
|
||||||
|
|
||||||
|
def on_edge(box, frame_shape):
|
||||||
|
if (
|
||||||
|
box[0] == 0 or
|
||||||
|
box[1] == 0 or
|
||||||
|
box[2] == frame_shape[1]-1 or
|
||||||
|
box[3] == frame_shape[0]-1
|
||||||
|
):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def is_better_thumbnail(current_thumb, new_obj, frame_shape) -> bool:
|
||||||
|
# larger is better
|
||||||
|
# cutoff images are less ideal, but they should also be smaller?
|
||||||
|
# better scores are obviously better too
|
||||||
|
|
||||||
|
# if the new_thumb is on an edge, and the current thumb is not
|
||||||
|
if on_edge(new_obj['box'], frame_shape) and not on_edge(current_thumb['box'], frame_shape):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# if the score is better by more than 5%
|
||||||
|
if new_obj['score'] > current_thumb['score']+.05:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# if the area is 10% larger
|
||||||
|
if new_obj['area'] > current_thumb['area']*1.1:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
class TrackedObject():
|
||||||
|
def __init__(self, camera, camera_config: CameraConfig, frame_cache, obj_data):
|
||||||
|
self.obj_data = obj_data
|
||||||
|
self.camera = camera
|
||||||
|
self.camera_config = camera_config
|
||||||
|
self.frame_cache = frame_cache
|
||||||
|
self.current_zones = []
|
||||||
|
self.entered_zones = set()
|
||||||
|
self.false_positive = True
|
||||||
|
self.top_score = self.computed_score = 0.0
|
||||||
|
self.thumbnail_data = None
|
||||||
|
self.frame = None
|
||||||
|
self.previous = self.to_dict()
|
||||||
|
|
||||||
|
# start the score history
|
||||||
|
self.score_history = [self.obj_data['score']]
|
||||||
|
|
||||||
|
def _is_false_positive(self):
|
||||||
|
# once a true positive, always a true positive
|
||||||
|
if not self.false_positive:
|
||||||
|
return False
|
||||||
|
|
||||||
|
threshold = self.camera_config.objects.filters[self.obj_data['label']].threshold
|
||||||
|
if self.computed_score < threshold:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def compute_score(self):
|
||||||
|
scores = self.score_history[:]
|
||||||
|
# pad with zeros if you dont have at least 3 scores
|
||||||
|
if len(scores) < 3:
|
||||||
|
scores += [0.0]*(3 - len(scores))
|
||||||
|
return median(scores)
|
||||||
|
|
||||||
|
def update(self, current_frame_time, obj_data):
|
||||||
|
significant_update = False
|
||||||
|
self.obj_data.update(obj_data)
|
||||||
|
# if the object is not in the current frame, add a 0.0 to the score history
|
||||||
|
if self.obj_data['frame_time'] != current_frame_time:
|
||||||
|
self.score_history.append(0.0)
|
||||||
|
else:
|
||||||
|
self.score_history.append(self.obj_data['score'])
|
||||||
|
# only keep the last 10 scores
|
||||||
|
if len(self.score_history) > 10:
|
||||||
|
self.score_history = self.score_history[-10:]
|
||||||
|
|
||||||
|
# calculate if this is a false positive
|
||||||
|
self.computed_score = self.compute_score()
|
||||||
|
if self.computed_score > self.top_score:
|
||||||
|
self.top_score = self.computed_score
|
||||||
|
self.false_positive = self._is_false_positive()
|
||||||
|
|
||||||
|
if not self.false_positive:
|
||||||
|
# determine if this frame is a better thumbnail
|
||||||
|
if (
|
||||||
|
self.thumbnail_data is None
|
||||||
|
or is_better_thumbnail(self.thumbnail_data, self.obj_data, self.camera_config.frame_shape)
|
||||||
|
):
|
||||||
|
self.thumbnail_data = {
|
||||||
|
'frame_time': self.obj_data['frame_time'],
|
||||||
|
'box': self.obj_data['box'],
|
||||||
|
'area': self.obj_data['area'],
|
||||||
|
'region': self.obj_data['region'],
|
||||||
|
'score': self.obj_data['score']
|
||||||
|
}
|
||||||
|
significant_update = True
|
||||||
|
|
||||||
|
# check zones
|
||||||
|
current_zones = []
|
||||||
|
bottom_center = (self.obj_data['centroid'][0], self.obj_data['box'][3])
|
||||||
|
# check each zone
|
||||||
|
for name, zone in self.camera_config.zones.items():
|
||||||
|
contour = zone.contour
|
||||||
|
# check if the object is in the zone
|
||||||
|
if (cv2.pointPolygonTest(contour, bottom_center, False) >= 0):
|
||||||
|
# if the object passed the filters once, dont apply again
|
||||||
|
if name in self.current_zones or not zone_filtered(self, zone.filters):
|
||||||
|
current_zones.append(name)
|
||||||
|
self.entered_zones.add(name)
|
||||||
|
|
||||||
|
# if the zones changed, signal an update
|
||||||
|
if not self.false_positive and set(self.current_zones) != set(current_zones):
|
||||||
|
significant_update = True
|
||||||
|
|
||||||
|
self.current_zones = current_zones
|
||||||
|
return significant_update
|
||||||
|
|
||||||
|
def to_dict(self, include_thumbnail: bool = False):
|
||||||
|
return {
|
||||||
|
'id': self.obj_data['id'],
|
||||||
|
'camera': self.camera,
|
||||||
|
'frame_time': self.obj_data['frame_time'],
|
||||||
|
'label': self.obj_data['label'],
|
||||||
|
'top_score': self.top_score,
|
||||||
|
'false_positive': self.false_positive,
|
||||||
|
'start_time': self.obj_data['start_time'],
|
||||||
|
'end_time': self.obj_data.get('end_time', None),
|
||||||
|
'score': self.obj_data['score'],
|
||||||
|
'box': self.obj_data['box'],
|
||||||
|
'area': self.obj_data['area'],
|
||||||
|
'region': self.obj_data['region'],
|
||||||
|
'current_zones': self.current_zones.copy(),
|
||||||
|
'entered_zones': list(self.entered_zones).copy(),
|
||||||
|
'thumbnail': base64.b64encode(self.get_thumbnail()).decode('utf-8') if include_thumbnail else None
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_thumbnail(self):
|
||||||
|
if self.thumbnail_data is None or not self.thumbnail_data['frame_time'] in self.frame_cache:
|
||||||
|
ret, jpg = cv2.imencode('.jpg', np.zeros((175,175,3), np.uint8))
|
||||||
|
|
||||||
|
jpg_bytes = self.get_jpg_bytes(timestamp=False, bounding_box=False, crop=True, height=175)
|
||||||
|
|
||||||
|
if jpg_bytes:
|
||||||
|
return jpg_bytes
|
||||||
|
else:
|
||||||
|
ret, jpg = cv2.imencode('.jpg', np.zeros((175,175,3), np.uint8))
|
||||||
|
return jpg.tobytes()
|
||||||
|
|
||||||
|
def get_jpg_bytes(self, timestamp=False, bounding_box=False, crop=False, height=None):
|
||||||
|
if self.thumbnail_data is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
best_frame = cv2.cvtColor(self.frame_cache[self.thumbnail_data['frame_time']], cv2.COLOR_YUV2BGR_I420)
|
||||||
|
|
||||||
|
if bounding_box:
|
||||||
|
thickness = 2
|
||||||
|
color = COLOR_MAP[self.obj_data['label']]
|
||||||
|
|
||||||
|
# draw the bounding boxes on the frame
|
||||||
|
box = self.thumbnail_data['box']
|
||||||
|
draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], self.obj_data['label'], f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}", thickness=thickness, color=color)
|
||||||
|
|
||||||
|
if crop:
|
||||||
|
box = self.thumbnail_data['box']
|
||||||
|
region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
|
||||||
|
best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
|
||||||
|
|
||||||
|
if height:
|
||||||
|
width = int(height*best_frame.shape[1]/best_frame.shape[0])
|
||||||
|
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||||
|
|
||||||
|
if timestamp:
|
||||||
|
time_to_show = datetime.datetime.fromtimestamp(self.thumbnail_data['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
|
||||||
|
size = cv2.getTextSize(time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2)
|
||||||
|
text_width = size[0][0]
|
||||||
|
desired_size = max(150, 0.33*best_frame.shape[1])
|
||||||
|
font_scale = desired_size/text_width
|
||||||
|
cv2.putText(best_frame, time_to_show, (5, best_frame.shape[0]-7), cv2.FONT_HERSHEY_SIMPLEX,
|
||||||
|
fontScale=font_scale, color=(255, 255, 255), thickness=2)
|
||||||
|
|
||||||
|
ret, jpg = cv2.imencode('.jpg', best_frame)
|
||||||
|
if ret:
|
||||||
|
return jpg.tobytes()
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def zone_filtered(obj: TrackedObject, object_config):
|
||||||
|
object_name = obj.obj_data['label']
|
||||||
|
|
||||||
|
if object_name in object_config:
|
||||||
|
obj_settings = object_config[object_name]
|
||||||
|
|
||||||
|
# if the min area is larger than the
|
||||||
|
# detected object, don't add it to detected objects
|
||||||
|
if obj_settings.min_area > obj.obj_data['area']:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# if the detected object is larger than the
|
||||||
|
# max area, don't add it to detected objects
|
||||||
|
if obj_settings.max_area < obj.obj_data['area']:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# if the score is lower than the threshold, skip
|
||||||
|
if obj_settings.threshold > obj.computed_score:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Maintains the state of a camera
|
||||||
|
class CameraState():
|
||||||
|
def __init__(self, name, config, frame_manager):
|
||||||
|
self.name = name
|
||||||
|
self.config = config
|
||||||
|
self.camera_config = config.cameras[name]
|
||||||
|
self.frame_manager = frame_manager
|
||||||
|
self.best_objects: Dict[str, TrackedObject] = {}
|
||||||
|
self.object_counts = defaultdict(lambda: 0)
|
||||||
|
self.tracked_objects: Dict[str, TrackedObject] = {}
|
||||||
|
self.frame_cache = {}
|
||||||
|
self.zone_objects = defaultdict(lambda: [])
|
||||||
|
self._current_frame = np.zeros(self.camera_config.frame_shape_yuv, np.uint8)
|
||||||
|
self.current_frame_lock = threading.Lock()
|
||||||
|
self.current_frame_time = 0.0
|
||||||
|
self.motion_boxes = []
|
||||||
|
self.regions = []
|
||||||
|
self.previous_frame_id = None
|
||||||
|
self.callbacks = defaultdict(lambda: [])
|
||||||
|
|
||||||
|
def get_current_frame(self, draw_options={}):
|
||||||
|
with self.current_frame_lock:
|
||||||
|
frame_copy = np.copy(self._current_frame)
|
||||||
|
frame_time = self.current_frame_time
|
||||||
|
tracked_objects = {k: v.to_dict() for k,v in self.tracked_objects.items()}
|
||||||
|
motion_boxes = self.motion_boxes.copy()
|
||||||
|
regions = self.regions.copy()
|
||||||
|
|
||||||
|
frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
|
||||||
|
# draw on the frame
|
||||||
|
if draw_options.get('bounding_boxes'):
|
||||||
|
# draw the bounding boxes on the frame
|
||||||
|
for obj in tracked_objects.values():
|
||||||
|
thickness = 2
|
||||||
|
color = COLOR_MAP[obj['label']]
|
||||||
|
|
||||||
|
if obj['frame_time'] != frame_time:
|
||||||
|
thickness = 1
|
||||||
|
color = (255,0,0)
|
||||||
|
|
||||||
|
# draw the bounding boxes on the frame
|
||||||
|
box = obj['box']
|
||||||
|
draw_box_with_label(frame_copy, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
|
||||||
|
|
||||||
|
if draw_options.get('regions'):
|
||||||
|
for region in regions:
|
||||||
|
cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 2)
|
||||||
|
|
||||||
|
if draw_options.get('zones'):
|
||||||
|
for name, zone in self.camera_config.zones.items():
|
||||||
|
thickness = 8 if any([name in obj['current_zones'] for obj in tracked_objects.values()]) else 2
|
||||||
|
cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
|
||||||
|
|
||||||
|
if draw_options.get('mask'):
|
||||||
|
mask_overlay = np.where(self.camera_config.motion.mask==[0])
|
||||||
|
frame_copy[mask_overlay] = [0,0,0]
|
||||||
|
|
||||||
|
if draw_options.get('motion_boxes'):
|
||||||
|
for m_box in motion_boxes:
|
||||||
|
cv2.rectangle(frame_copy, (m_box[0], m_box[1]), (m_box[2], m_box[3]), (0,0,255), 2)
|
||||||
|
|
||||||
|
if draw_options.get('timestamp'):
|
||||||
|
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
|
||||||
|
cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
|
||||||
|
|
||||||
|
return frame_copy
|
||||||
|
|
||||||
|
def finished(self, obj_id):
|
||||||
|
del self.tracked_objects[obj_id]
|
||||||
|
|
||||||
|
def on(self, event_type: str, callback: Callable[[Dict], None]):
|
||||||
|
self.callbacks[event_type].append(callback)
|
||||||
|
|
||||||
|
def update(self, frame_time, current_detections, motion_boxes, regions):
|
||||||
|
self.current_frame_time = frame_time
|
||||||
|
self.motion_boxes = motion_boxes
|
||||||
|
self.regions = regions
|
||||||
|
# get the new frame
|
||||||
|
frame_id = f"{self.name}{frame_time}"
|
||||||
|
current_frame = self.frame_manager.get(frame_id, self.camera_config.frame_shape_yuv)
|
||||||
|
|
||||||
|
current_ids = current_detections.keys()
|
||||||
|
previous_ids = self.tracked_objects.keys()
|
||||||
|
removed_ids = list(set(previous_ids).difference(current_ids))
|
||||||
|
new_ids = list(set(current_ids).difference(previous_ids))
|
||||||
|
updated_ids = list(set(current_ids).intersection(previous_ids))
|
||||||
|
|
||||||
|
for id in new_ids:
|
||||||
|
new_obj = self.tracked_objects[id] = TrackedObject(self.name, self.camera_config, self.frame_cache, current_detections[id])
|
||||||
|
|
||||||
|
# call event handlers
|
||||||
|
for c in self.callbacks['start']:
|
||||||
|
c(self.name, new_obj, frame_time)
|
||||||
|
|
||||||
|
for id in updated_ids:
|
||||||
|
updated_obj = self.tracked_objects[id]
|
||||||
|
significant_update = updated_obj.update(frame_time, current_detections[id])
|
||||||
|
|
||||||
|
if significant_update:
|
||||||
|
# ensure this frame is stored in the cache
|
||||||
|
if updated_obj.thumbnail_data['frame_time'] == frame_time and frame_time not in self.frame_cache:
|
||||||
|
self.frame_cache[frame_time] = np.copy(current_frame)
|
||||||
|
|
||||||
|
# call event handlers
|
||||||
|
for c in self.callbacks['update']:
|
||||||
|
c(self.name, updated_obj, frame_time)
|
||||||
|
|
||||||
|
for id in removed_ids:
|
||||||
|
# publish events to mqtt
|
||||||
|
removed_obj = self.tracked_objects[id]
|
||||||
|
if not 'end_time' in removed_obj.obj_data:
|
||||||
|
removed_obj.obj_data['end_time'] = frame_time
|
||||||
|
for c in self.callbacks['end']:
|
||||||
|
c(self.name, removed_obj, frame_time)
|
||||||
|
|
||||||
|
# TODO: can i switch to looking this up and only changing when an event ends?
|
||||||
|
# maintain best objects
|
||||||
|
for obj in self.tracked_objects.values():
|
||||||
|
object_type = obj.obj_data['label']
|
||||||
|
# if the object's thumbnail is not from the current frame
|
||||||
|
if obj.false_positive or obj.thumbnail_data['frame_time'] != self.current_frame_time:
|
||||||
|
continue
|
||||||
|
if object_type in self.best_objects:
|
||||||
|
current_best = self.best_objects[object_type]
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
|
# if the object is a higher score than the current best score
|
||||||
|
# or the current object is older than desired, use the new object
|
||||||
|
if (is_better_thumbnail(current_best.thumbnail_data, obj.thumbnail_data, self.camera_config.frame_shape)
|
||||||
|
or (now - current_best.thumbnail_data['frame_time']) > self.camera_config.best_image_timeout):
|
||||||
|
self.best_objects[object_type] = obj
|
||||||
|
for c in self.callbacks['snapshot']:
|
||||||
|
c(self.name, self.best_objects[object_type], frame_time)
|
||||||
|
else:
|
||||||
|
self.best_objects[object_type] = obj
|
||||||
|
for c in self.callbacks['snapshot']:
|
||||||
|
c(self.name, self.best_objects[object_type], frame_time)
|
||||||
|
|
||||||
|
# update overall camera state for each object type
|
||||||
|
obj_counter = Counter()
|
||||||
|
for obj in self.tracked_objects.values():
|
||||||
|
if not obj.false_positive:
|
||||||
|
obj_counter[obj.obj_data['label']] += 1
|
||||||
|
|
||||||
|
# report on detected objects
|
||||||
|
for obj_name, count in obj_counter.items():
|
||||||
|
if count != self.object_counts[obj_name]:
|
||||||
|
self.object_counts[obj_name] = count
|
||||||
|
for c in self.callbacks['object_status']:
|
||||||
|
c(self.name, obj_name, count)
|
||||||
|
|
||||||
|
# expire any objects that are >0 and no longer detected
|
||||||
|
expired_objects = [obj_name for obj_name, count in self.object_counts.items() if count > 0 and not obj_name in obj_counter]
|
||||||
|
for obj_name in expired_objects:
|
||||||
|
self.object_counts[obj_name] = 0
|
||||||
|
for c in self.callbacks['object_status']:
|
||||||
|
c(self.name, obj_name, 0)
|
||||||
|
for c in self.callbacks['snapshot']:
|
||||||
|
c(self.name, self.best_objects[obj_name], frame_time)
|
||||||
|
|
||||||
|
# cleanup thumbnail frame cache
|
||||||
|
current_thumb_frames = set([obj.thumbnail_data['frame_time'] for obj in self.tracked_objects.values() if not obj.false_positive])
|
||||||
|
current_best_frames = set([obj.thumbnail_data['frame_time'] for obj in self.best_objects.values()])
|
||||||
|
thumb_frames_to_delete = [t for t in self.frame_cache.keys() if not t in current_thumb_frames and not t in current_best_frames]
|
||||||
|
for t in thumb_frames_to_delete:
|
||||||
|
del self.frame_cache[t]
|
||||||
|
|
||||||
|
with self.current_frame_lock:
|
||||||
|
self._current_frame = current_frame
|
||||||
|
if not self.previous_frame_id is None:
|
||||||
|
self.frame_manager.delete(self.previous_frame_id)
|
||||||
|
self.previous_frame_id = frame_id
|
||||||
|
|
||||||
|
class TrackedObjectProcessor(threading.Thread):
|
||||||
|
def __init__(self, config: FrigateConfig, client, topic_prefix, tracked_objects_queue, event_queue, event_processed_queue, stop_event):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.name = "detected_frames_processor"
|
||||||
|
self.config = config
|
||||||
|
self.client = client
|
||||||
|
self.topic_prefix = topic_prefix
|
||||||
|
self.tracked_objects_queue = tracked_objects_queue
|
||||||
|
self.event_queue = event_queue
|
||||||
|
self.event_processed_queue = event_processed_queue
|
||||||
|
self.stop_event = stop_event
|
||||||
|
self.camera_states: Dict[str, CameraState] = {}
|
||||||
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
|
|
||||||
|
def start(camera, obj: TrackedObject, current_frame_time):
|
||||||
|
self.event_queue.put(('start', camera, obj.to_dict()))
|
||||||
|
|
||||||
|
def update(camera, obj: TrackedObject, current_frame_time):
|
||||||
|
after = obj.to_dict()
|
||||||
|
message = { 'before': obj.previous, 'after': after, 'type': 'new' if obj.previous['false_positive'] else 'update' }
|
||||||
|
self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
|
||||||
|
obj.previous = after
|
||||||
|
|
||||||
|
def end(camera, obj: TrackedObject, current_frame_time):
|
||||||
|
snapshot_config = self.config.cameras[camera].snapshots
|
||||||
|
event_data = obj.to_dict(include_thumbnail=True)
|
||||||
|
event_data['has_snapshot'] = False
|
||||||
|
if not obj.false_positive:
|
||||||
|
message = { 'before': obj.previous, 'after': obj.to_dict(), 'type': 'end' }
|
||||||
|
self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
|
||||||
|
# write snapshot to disk if enabled
|
||||||
|
if snapshot_config.enabled:
|
||||||
|
jpg_bytes = obj.get_jpg_bytes(
|
||||||
|
timestamp=snapshot_config.timestamp,
|
||||||
|
bounding_box=snapshot_config.bounding_box,
|
||||||
|
crop=snapshot_config.crop,
|
||||||
|
height=snapshot_config.height
|
||||||
|
)
|
||||||
|
with open(os.path.join(CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"), 'wb') as j:
|
||||||
|
j.write(jpg_bytes)
|
||||||
|
event_data['has_snapshot'] = True
|
||||||
|
self.event_queue.put(('end', camera, event_data))
|
||||||
|
|
||||||
|
def snapshot(camera, obj: TrackedObject, current_frame_time):
|
||||||
|
mqtt_config = self.config.cameras[camera].mqtt
|
||||||
|
if mqtt_config.enabled:
|
||||||
|
jpg_bytes = obj.get_jpg_bytes(
|
||||||
|
timestamp=mqtt_config.timestamp,
|
||||||
|
bounding_box=mqtt_config.bounding_box,
|
||||||
|
crop=mqtt_config.crop,
|
||||||
|
height=mqtt_config.height
|
||||||
|
)
|
||||||
|
self.client.publish(f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot", jpg_bytes, retain=True)
|
||||||
|
|
||||||
|
def object_status(camera, object_name, status):
|
||||||
|
self.client.publish(f"{self.topic_prefix}/{camera}/{object_name}", status, retain=False)
|
||||||
|
|
||||||
|
for camera in self.config.cameras.keys():
|
||||||
|
camera_state = CameraState(camera, self.config, self.frame_manager)
|
||||||
|
camera_state.on('start', start)
|
||||||
|
camera_state.on('update', update)
|
||||||
|
camera_state.on('end', end)
|
||||||
|
camera_state.on('snapshot', snapshot)
|
||||||
|
camera_state.on('object_status', object_status)
|
||||||
|
self.camera_states[camera] = camera_state
|
||||||
|
|
||||||
|
# {
|
||||||
|
# 'zone_name': {
|
||||||
|
# 'person': {
|
||||||
|
# 'camera_1': 2,
|
||||||
|
# 'camera_2': 1
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
self.zone_data = defaultdict(lambda: defaultdict(lambda: {}))
|
||||||
|
|
||||||
|
def get_best(self, camera, label):
|
||||||
|
# TODO: need a lock here
|
||||||
|
camera_state = self.camera_states[camera]
|
||||||
|
if label in camera_state.best_objects:
|
||||||
|
best_obj = camera_state.best_objects[label]
|
||||||
|
best = best_obj.thumbnail_data.copy()
|
||||||
|
best['frame'] = camera_state.frame_cache.get(best_obj.thumbnail_data['frame_time'])
|
||||||
|
return best
|
||||||
|
else:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def get_current_frame(self, camera, draw_options={}):
|
||||||
|
return self.camera_states[camera].get_current_frame(draw_options)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
while True:
|
||||||
|
if self.stop_event.is_set():
|
||||||
|
logger.info(f"Exiting object processor...")
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
camera, frame_time, current_tracked_objects, motion_boxes, regions = self.tracked_objects_queue.get(True, 10)
|
||||||
|
except queue.Empty:
|
||||||
|
continue
|
||||||
|
|
||||||
|
camera_state = self.camera_states[camera]
|
||||||
|
|
||||||
|
camera_state.update(frame_time, current_tracked_objects, motion_boxes, regions)
|
||||||
|
|
||||||
|
# update zone counts for each label
|
||||||
|
# for each zone in the current camera
|
||||||
|
for zone in self.config.cameras[camera].zones.keys():
|
||||||
|
# count labels for the camera in the zone
|
||||||
|
obj_counter = Counter()
|
||||||
|
for obj in camera_state.tracked_objects.values():
|
||||||
|
if zone in obj.current_zones and not obj.false_positive:
|
||||||
|
obj_counter[obj.obj_data['label']] += 1
|
||||||
|
|
||||||
|
# update counts and publish status
|
||||||
|
for label in set(list(self.zone_data[zone].keys()) + list(obj_counter.keys())):
|
||||||
|
# if we have previously published a count for this zone/label
|
||||||
|
zone_label = self.zone_data[zone][label]
|
||||||
|
if camera in zone_label:
|
||||||
|
current_count = sum(zone_label.values())
|
||||||
|
zone_label[camera] = obj_counter[label] if label in obj_counter else 0
|
||||||
|
new_count = sum(zone_label.values())
|
||||||
|
if new_count != current_count:
|
||||||
|
self.client.publish(f"{self.topic_prefix}/{zone}/{label}", new_count, retain=False)
|
||||||
|
# if this is a new zone/label combo for this camera
|
||||||
|
else:
|
||||||
|
if label in obj_counter:
|
||||||
|
zone_label[camera] = obj_counter[label]
|
||||||
|
self.client.publish(f"{self.topic_prefix}/{zone}/{label}", obj_counter[label], retain=False)
|
||||||
|
|
||||||
|
# cleanup event finished queue
|
||||||
|
while not self.event_processed_queue.empty():
|
||||||
|
event_id, camera = self.event_processed_queue.get()
|
||||||
|
self.camera_states[camera].finished(event_id)
|
||||||
@@ -1,88 +1,149 @@
|
|||||||
import time
|
import copy
|
||||||
import datetime
|
import datetime
|
||||||
|
import itertools
|
||||||
|
import multiprocessing as mp
|
||||||
|
import random
|
||||||
|
import string
|
||||||
import threading
|
import threading
|
||||||
|
import time
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
from . util import draw_box_with_label
|
import numpy as np
|
||||||
|
from scipy.spatial import distance as dist
|
||||||
|
|
||||||
class ObjectCleaner(threading.Thread):
|
from frigate.config import DetectConfig
|
||||||
def __init__(self, objects_parsed, detected_objects):
|
from frigate.util import draw_box_with_label
|
||||||
threading.Thread.__init__(self)
|
|
||||||
self._objects_parsed = objects_parsed
|
|
||||||
self._detected_objects = detected_objects
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
while True:
|
|
||||||
|
|
||||||
# wait a bit before checking for expired frames
|
|
||||||
time.sleep(0.2)
|
|
||||||
|
|
||||||
# expire the objects that are more than 1 second old
|
|
||||||
now = datetime.datetime.now().timestamp()
|
|
||||||
# look for the first object found within the last second
|
|
||||||
# (newest objects are appended to the end)
|
|
||||||
detected_objects = self._detected_objects.copy()
|
|
||||||
|
|
||||||
num_to_delete = 0
|
|
||||||
for obj in detected_objects:
|
|
||||||
if now-obj['frame_time']<2:
|
|
||||||
break
|
|
||||||
num_to_delete += 1
|
|
||||||
if num_to_delete > 0:
|
|
||||||
del self._detected_objects[:num_to_delete]
|
|
||||||
|
|
||||||
# notify that parsed objects were changed
|
|
||||||
with self._objects_parsed:
|
|
||||||
self._objects_parsed.notify_all()
|
|
||||||
|
|
||||||
|
|
||||||
# Maintains the frame and person with the highest score from the most recent
|
class ObjectTracker():
|
||||||
# motion event
|
def __init__(self, config: DetectConfig):
|
||||||
class BestPersonFrame(threading.Thread):
|
self.tracked_objects = {}
|
||||||
def __init__(self, objects_parsed, recent_frames, detected_objects):
|
self.disappeared = {}
|
||||||
threading.Thread.__init__(self)
|
self.max_disappeared = config.max_disappeared
|
||||||
self.objects_parsed = objects_parsed
|
|
||||||
self.recent_frames = recent_frames
|
|
||||||
self.detected_objects = detected_objects
|
|
||||||
self.best_person = None
|
|
||||||
self.best_frame = None
|
|
||||||
|
|
||||||
def run(self):
|
def register(self, index, obj):
|
||||||
while True:
|
rand_id = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||||
|
id = f"{obj['frame_time']}-{rand_id}"
|
||||||
|
obj['id'] = id
|
||||||
|
obj['start_time'] = obj['frame_time']
|
||||||
|
self.tracked_objects[id] = obj
|
||||||
|
self.disappeared[id] = 0
|
||||||
|
|
||||||
# wait until objects have been parsed
|
def deregister(self, id):
|
||||||
with self.objects_parsed:
|
del self.tracked_objects[id]
|
||||||
self.objects_parsed.wait()
|
del self.disappeared[id]
|
||||||
|
|
||||||
|
def update(self, id, new_obj):
|
||||||
|
self.disappeared[id] = 0
|
||||||
|
self.tracked_objects[id].update(new_obj)
|
||||||
|
|
||||||
# make a copy of detected objects
|
def match_and_update(self, frame_time, new_objects):
|
||||||
detected_objects = self.detected_objects.copy()
|
# group by name
|
||||||
detected_people = [obj for obj in detected_objects if obj['name'] == 'person']
|
new_object_groups = defaultdict(lambda: [])
|
||||||
|
for obj in new_objects:
|
||||||
|
new_object_groups[obj[0]].append({
|
||||||
|
'label': obj[0],
|
||||||
|
'score': obj[1],
|
||||||
|
'box': obj[2],
|
||||||
|
'area': obj[3],
|
||||||
|
'region': obj[4],
|
||||||
|
'frame_time': frame_time
|
||||||
|
})
|
||||||
|
|
||||||
|
# update any tracked objects with labels that are not
|
||||||
|
# seen in the current objects and deregister if needed
|
||||||
|
for obj in list(self.tracked_objects.values()):
|
||||||
|
if not obj['label'] in new_object_groups:
|
||||||
|
if self.disappeared[obj['id']] >= self.max_disappeared:
|
||||||
|
self.deregister(obj['id'])
|
||||||
|
else:
|
||||||
|
self.disappeared[obj['id']] += 1
|
||||||
|
|
||||||
|
if len(new_objects) == 0:
|
||||||
|
return
|
||||||
|
|
||||||
|
# track objects for each label type
|
||||||
|
for label, group in new_object_groups.items():
|
||||||
|
current_objects = [o for o in self.tracked_objects.values() if o['label'] == label]
|
||||||
|
current_ids = [o['id'] for o in current_objects]
|
||||||
|
current_centroids = np.array([o['centroid'] for o in current_objects])
|
||||||
|
|
||||||
# get the highest scoring person
|
# compute centroids of new objects
|
||||||
new_best_person = max(detected_people, key=lambda x:x['score'], default=self.best_person)
|
for obj in group:
|
||||||
|
centroid_x = int((obj['box'][0]+obj['box'][2]) / 2.0)
|
||||||
|
centroid_y = int((obj['box'][1]+obj['box'][3]) / 2.0)
|
||||||
|
obj['centroid'] = (centroid_x, centroid_y)
|
||||||
|
|
||||||
# if there isnt a person, continue
|
if len(current_objects) == 0:
|
||||||
if new_best_person is None:
|
for index, obj in enumerate(group):
|
||||||
continue
|
self.register(index, obj)
|
||||||
|
return
|
||||||
|
|
||||||
|
new_centroids = np.array([o['centroid'] for o in group])
|
||||||
|
|
||||||
# if there is no current best_person
|
# compute the distance between each pair of tracked
|
||||||
if self.best_person is None:
|
# centroids and new centroids, respectively -- our
|
||||||
self.best_person = new_best_person
|
# goal will be to match each new centroid to an existing
|
||||||
# if there is already a best_person
|
# object centroid
|
||||||
|
D = dist.cdist(current_centroids, new_centroids)
|
||||||
|
|
||||||
|
# in order to perform this matching we must (1) find the
|
||||||
|
# smallest value in each row and then (2) sort the row
|
||||||
|
# indexes based on their minimum values so that the row
|
||||||
|
# with the smallest value is at the *front* of the index
|
||||||
|
# list
|
||||||
|
rows = D.min(axis=1).argsort()
|
||||||
|
|
||||||
|
# next, we perform a similar process on the columns by
|
||||||
|
# finding the smallest value in each column and then
|
||||||
|
# sorting using the previously computed row index list
|
||||||
|
cols = D.argmin(axis=1)[rows]
|
||||||
|
|
||||||
|
# in order to determine if we need to update, register,
|
||||||
|
# or deregister an object we need to keep track of which
|
||||||
|
# of the rows and column indexes we have already examined
|
||||||
|
usedRows = set()
|
||||||
|
usedCols = set()
|
||||||
|
|
||||||
|
# loop over the combination of the (row, column) index
|
||||||
|
# tuples
|
||||||
|
for (row, col) in zip(rows, cols):
|
||||||
|
# if we have already examined either the row or
|
||||||
|
# column value before, ignore it
|
||||||
|
if row in usedRows or col in usedCols:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# otherwise, grab the object ID for the current row,
|
||||||
|
# set its new centroid, and reset the disappeared
|
||||||
|
# counter
|
||||||
|
objectID = current_ids[row]
|
||||||
|
self.update(objectID, group[col])
|
||||||
|
|
||||||
|
# indicate that we have examined each of the row and
|
||||||
|
# column indexes, respectively
|
||||||
|
usedRows.add(row)
|
||||||
|
usedCols.add(col)
|
||||||
|
|
||||||
|
# compute the column index we have NOT yet examined
|
||||||
|
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
|
||||||
|
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
|
||||||
|
|
||||||
|
# in the event that the number of object centroids is
|
||||||
|
# equal or greater than the number of input centroids
|
||||||
|
# we need to check and see if some of these objects have
|
||||||
|
# potentially disappeared
|
||||||
|
if D.shape[0] >= D.shape[1]:
|
||||||
|
for row in unusedRows:
|
||||||
|
id = current_ids[row]
|
||||||
|
|
||||||
|
if self.disappeared[id] >= self.max_disappeared:
|
||||||
|
self.deregister(id)
|
||||||
|
else:
|
||||||
|
self.disappeared[id] += 1
|
||||||
|
# if the number of input centroids is greater
|
||||||
|
# than the number of existing object centroids we need to
|
||||||
|
# register each new input centroid as a trackable object
|
||||||
else:
|
else:
|
||||||
now = datetime.datetime.now().timestamp()
|
for col in unusedCols:
|
||||||
# if the new best person is a higher score than the current best person
|
self.register(col, group[col])
|
||||||
# or the current person is more than 1 minute old, use the new best person
|
|
||||||
if new_best_person['score'] > self.best_person['score'] or (now - self.best_person['frame_time']) > 60:
|
|
||||||
self.best_person = new_best_person
|
|
||||||
|
|
||||||
# make a copy of the recent frames
|
|
||||||
recent_frames = self.recent_frames.copy()
|
|
||||||
|
|
||||||
if not self.best_person is None and self.best_person['frame_time'] in recent_frames:
|
|
||||||
best_frame = recent_frames[self.best_person['frame_time']]
|
|
||||||
|
|
||||||
label = "{}: {}% {}".format(self.best_person['name'],int(self.best_person['score']*100),int(self.best_person['area']))
|
|
||||||
draw_box_with_label(best_frame, self.best_person['xmin'], self.best_person['ymin'],
|
|
||||||
self.best_person['xmax'], self.best_person['ymax'], label)
|
|
||||||
|
|
||||||
self.best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR)
|
|
||||||
|
|||||||
208
frigate/process_clip.py
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import multiprocessing as mp
|
||||||
|
import os
|
||||||
|
import subprocess as sp
|
||||||
|
import sys
|
||||||
|
from unittest import TestCase, main
|
||||||
|
|
||||||
|
import click
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig
|
||||||
|
from frigate.edgetpu import LocalObjectDetector
|
||||||
|
from frigate.motion import MotionDetector
|
||||||
|
from frigate.object_processing import COLOR_MAP, CameraState
|
||||||
|
from frigate.objects import ObjectTracker
|
||||||
|
from frigate.util import (DictFrameManager, EventsPerSecond,
|
||||||
|
SharedMemoryFrameManager, draw_box_with_label)
|
||||||
|
from frigate.video import (capture_frames, process_frames,
|
||||||
|
start_or_restart_ffmpeg)
|
||||||
|
|
||||||
|
logging.basicConfig()
|
||||||
|
logging.root.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
def get_frame_shape(source):
|
||||||
|
ffprobe_cmd = " ".join([
|
||||||
|
'ffprobe',
|
||||||
|
'-v',
|
||||||
|
'panic',
|
||||||
|
'-show_error',
|
||||||
|
'-show_streams',
|
||||||
|
'-of',
|
||||||
|
'json',
|
||||||
|
'"'+source+'"'
|
||||||
|
])
|
||||||
|
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
|
||||||
|
(output, err) = p.communicate()
|
||||||
|
p_status = p.wait()
|
||||||
|
info = json.loads(output)
|
||||||
|
|
||||||
|
video_info = [s for s in info['streams'] if s['codec_type'] == 'video'][0]
|
||||||
|
|
||||||
|
if video_info['height'] != 0 and video_info['width'] != 0:
|
||||||
|
return (video_info['height'], video_info['width'], 3)
|
||||||
|
|
||||||
|
# fallback to using opencv if ffprobe didnt succeed
|
||||||
|
video = cv2.VideoCapture(source)
|
||||||
|
ret, frame = video.read()
|
||||||
|
frame_shape = frame.shape
|
||||||
|
video.release()
|
||||||
|
return frame_shape
|
||||||
|
|
||||||
|
class ProcessClip():
|
||||||
|
def __init__(self, clip_path, frame_shape, config: FrigateConfig):
|
||||||
|
self.clip_path = clip_path
|
||||||
|
self.camera_name = 'camera'
|
||||||
|
self.config = config
|
||||||
|
self.camera_config = self.config.cameras['camera']
|
||||||
|
self.frame_shape = self.camera_config.frame_shape
|
||||||
|
self.ffmpeg_cmd = [c['cmd'] for c in self.camera_config.ffmpeg_cmds if 'detect' in c['roles']][0]
|
||||||
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
|
self.frame_queue = mp.Queue()
|
||||||
|
self.detected_objects_queue = mp.Queue()
|
||||||
|
self.camera_state = CameraState(self.camera_name, config, self.frame_manager)
|
||||||
|
|
||||||
|
def load_frames(self):
|
||||||
|
fps = EventsPerSecond()
|
||||||
|
skipped_fps = EventsPerSecond()
|
||||||
|
current_frame = mp.Value('d', 0.0)
|
||||||
|
frame_size = self.camera_config.frame_shape_yuv[0] * self.camera_config.frame_shape_yuv[1]
|
||||||
|
ffmpeg_process = start_or_restart_ffmpeg(self.ffmpeg_cmd, logger, sp.DEVNULL, frame_size)
|
||||||
|
capture_frames(ffmpeg_process, self.camera_name, self.camera_config.frame_shape_yuv, self.frame_manager,
|
||||||
|
self.frame_queue, fps, skipped_fps, current_frame)
|
||||||
|
ffmpeg_process.wait()
|
||||||
|
ffmpeg_process.communicate()
|
||||||
|
|
||||||
|
def process_frames(self, objects_to_track=['person'], object_filters={}):
|
||||||
|
mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
|
||||||
|
mask[:] = 255
|
||||||
|
motion_detector = MotionDetector(self.frame_shape, mask, self.camera_config.motion)
|
||||||
|
|
||||||
|
object_detector = LocalObjectDetector(labels='/labelmap.txt')
|
||||||
|
object_tracker = ObjectTracker(self.camera_config.detect)
|
||||||
|
process_info = {
|
||||||
|
'process_fps': mp.Value('d', 0.0),
|
||||||
|
'detection_fps': mp.Value('d', 0.0),
|
||||||
|
'detection_frame': mp.Value('d', 0.0)
|
||||||
|
}
|
||||||
|
stop_event = mp.Event()
|
||||||
|
model_shape = (self.config.model.height, self.config.model.width)
|
||||||
|
|
||||||
|
process_frames(self.camera_name, self.frame_queue, self.frame_shape, model_shape,
|
||||||
|
self.frame_manager, motion_detector, object_detector, object_tracker,
|
||||||
|
self.detected_objects_queue, process_info,
|
||||||
|
objects_to_track, object_filters, mask, stop_event, exit_on_empty=True)
|
||||||
|
|
||||||
|
def top_object(self, debug_path=None):
|
||||||
|
obj_detected = False
|
||||||
|
top_computed_score = 0.0
|
||||||
|
def handle_event(name, obj, frame_time):
|
||||||
|
nonlocal obj_detected
|
||||||
|
nonlocal top_computed_score
|
||||||
|
if obj.computed_score > top_computed_score:
|
||||||
|
top_computed_score = obj.computed_score
|
||||||
|
if not obj.false_positive:
|
||||||
|
obj_detected = True
|
||||||
|
self.camera_state.on('new', handle_event)
|
||||||
|
self.camera_state.on('update', handle_event)
|
||||||
|
|
||||||
|
while(not self.detected_objects_queue.empty()):
|
||||||
|
camera_name, frame_time, current_tracked_objects, motion_boxes, regions = self.detected_objects_queue.get()
|
||||||
|
if not debug_path is None:
|
||||||
|
self.save_debug_frame(debug_path, frame_time, current_tracked_objects.values())
|
||||||
|
|
||||||
|
self.camera_state.update(frame_time, current_tracked_objects, motion_boxes, regions)
|
||||||
|
|
||||||
|
self.frame_manager.delete(self.camera_state.previous_frame_id)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'object_detected': obj_detected,
|
||||||
|
'top_score': top_computed_score
|
||||||
|
}
|
||||||
|
|
||||||
|
def save_debug_frame(self, debug_path, frame_time, tracked_objects):
|
||||||
|
current_frame = cv2.cvtColor(self.frame_manager.get(f"{self.camera_name}{frame_time}", self.camera_config.frame_shape_yuv), cv2.COLOR_YUV2BGR_I420)
|
||||||
|
# draw the bounding boxes on the frame
|
||||||
|
for obj in tracked_objects:
|
||||||
|
thickness = 2
|
||||||
|
color = (0,0,175)
|
||||||
|
|
||||||
|
if obj['frame_time'] != frame_time:
|
||||||
|
thickness = 1
|
||||||
|
color = (255,0,0)
|
||||||
|
else:
|
||||||
|
color = (255,255,0)
|
||||||
|
|
||||||
|
# draw the bounding boxes on the frame
|
||||||
|
box = obj['box']
|
||||||
|
draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['id'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
|
||||||
|
# draw the regions on the frame
|
||||||
|
region = obj['region']
|
||||||
|
draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', "", thickness=1, color=(0,255,0))
|
||||||
|
|
||||||
|
cv2.imwrite(f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg", current_frame)
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option("-p", "--path", required=True, help="Path to clip or directory to test.")
|
||||||
|
@click.option("-l", "--label", default='person', help="Label name to detect.")
|
||||||
|
@click.option("-t", "--threshold", default=0.85, help="Threshold value for objects.")
|
||||||
|
@click.option("-s", "--scores", default=None, help="File to save csv of top scores")
|
||||||
|
@click.option("--debug-path", default=None, help="Path to output frames for debugging.")
|
||||||
|
def process(path, label, threshold, scores, debug_path):
|
||||||
|
clips = []
|
||||||
|
if os.path.isdir(path):
|
||||||
|
files = os.listdir(path)
|
||||||
|
files.sort()
|
||||||
|
clips = [os.path.join(path, file) for file in files]
|
||||||
|
elif os.path.isfile(path):
|
||||||
|
clips.append(path)
|
||||||
|
|
||||||
|
json_config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'camera': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'path.mp4', 'global_args': '', 'input_args': '', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1920,
|
||||||
|
'width': 1080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
results = []
|
||||||
|
for c in clips:
|
||||||
|
logger.info(c)
|
||||||
|
frame_shape = get_frame_shape(c)
|
||||||
|
|
||||||
|
json_config['cameras']['camera']['height'] = frame_shape[0]
|
||||||
|
json_config['cameras']['camera']['width'] = frame_shape[1]
|
||||||
|
json_config['cameras']['camera']['ffmpeg']['inputs'][0]['path'] = c
|
||||||
|
|
||||||
|
config = FrigateConfig(config=FRIGATE_CONFIG_SCHEMA(json_config))
|
||||||
|
|
||||||
|
process_clip = ProcessClip(c, frame_shape, config)
|
||||||
|
process_clip.load_frames()
|
||||||
|
process_clip.process_frames(objects_to_track=[label])
|
||||||
|
|
||||||
|
results.append((c, process_clip.top_object(debug_path)))
|
||||||
|
|
||||||
|
if not scores is None:
|
||||||
|
with open(scores, 'w') as writer:
|
||||||
|
for result in results:
|
||||||
|
writer.write(f"{result[0]},{result[1]['top_score']}\n")
|
||||||
|
|
||||||
|
positive_count = sum(1 for result in results if result[1]['object_detected'])
|
||||||
|
print(f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s).")
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
process()
|
||||||
125
frigate/record.py
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import queue
|
||||||
|
import subprocess as sp
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from collections import defaultdict
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import psutil
|
||||||
|
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
SECONDS_IN_DAY = 60 * 60 * 24
|
||||||
|
|
||||||
|
def remove_empty_directories(directory):
|
||||||
|
# list all directories recursively and sort them by path,
|
||||||
|
# longest first
|
||||||
|
paths = sorted(
|
||||||
|
[x[0] for x in os.walk(RECORD_DIR)],
|
||||||
|
key=lambda p: len(str(p)),
|
||||||
|
reverse=True,
|
||||||
|
)
|
||||||
|
for path in paths:
|
||||||
|
# don't delete the parent
|
||||||
|
if path == RECORD_DIR:
|
||||||
|
continue
|
||||||
|
if len(os.listdir(path)) == 0:
|
||||||
|
os.rmdir(path)
|
||||||
|
|
||||||
|
class RecordingMaintainer(threading.Thread):
|
||||||
|
def __init__(self, config: FrigateConfig, stop_event):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.name = 'recording_maint'
|
||||||
|
self.config = config
|
||||||
|
self.stop_event = stop_event
|
||||||
|
|
||||||
|
def move_files(self):
|
||||||
|
recordings = [d for d in os.listdir(RECORD_DIR) if os.path.isfile(os.path.join(RECORD_DIR, d)) and d.endswith(".mp4")]
|
||||||
|
|
||||||
|
files_in_use = []
|
||||||
|
for process in psutil.process_iter():
|
||||||
|
try:
|
||||||
|
if process.name() != 'ffmpeg':
|
||||||
|
continue
|
||||||
|
flist = process.open_files()
|
||||||
|
if flist:
|
||||||
|
for nt in flist:
|
||||||
|
if nt.path.startswith(RECORD_DIR):
|
||||||
|
files_in_use.append(nt.path.split('/')[-1])
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for f in recordings:
|
||||||
|
if f in files_in_use:
|
||||||
|
continue
|
||||||
|
|
||||||
|
camera = '-'.join(f.split('-')[:-1])
|
||||||
|
start_time = datetime.datetime.strptime(f.split('-')[-1].split('.')[0], '%Y%m%d%H%M%S')
|
||||||
|
|
||||||
|
ffprobe_cmd = " ".join([
|
||||||
|
'ffprobe',
|
||||||
|
'-v',
|
||||||
|
'error',
|
||||||
|
'-show_entries',
|
||||||
|
'format=duration',
|
||||||
|
'-of',
|
||||||
|
'default=noprint_wrappers=1:nokey=1',
|
||||||
|
f"{os.path.join(RECORD_DIR,f)}"
|
||||||
|
])
|
||||||
|
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
|
||||||
|
(output, err) = p.communicate()
|
||||||
|
p_status = p.wait()
|
||||||
|
if p_status == 0:
|
||||||
|
duration = float(output.decode('utf-8').strip())
|
||||||
|
else:
|
||||||
|
logger.info(f"bad file: {f}")
|
||||||
|
os.remove(os.path.join(RECORD_DIR,f))
|
||||||
|
continue
|
||||||
|
|
||||||
|
directory = os.path.join(RECORD_DIR, start_time.strftime('%Y-%m/%d/%H'), camera)
|
||||||
|
|
||||||
|
if not os.path.exists(directory):
|
||||||
|
os.makedirs(directory)
|
||||||
|
|
||||||
|
file_name = f"{start_time.strftime('%M.%S.mp4')}"
|
||||||
|
|
||||||
|
os.rename(os.path.join(RECORD_DIR,f), os.path.join(directory,file_name))
|
||||||
|
|
||||||
|
def expire_files(self):
|
||||||
|
delete_before = {}
|
||||||
|
for name, camera in self.config.cameras.items():
|
||||||
|
delete_before[name] = datetime.datetime.now().timestamp() - SECONDS_IN_DAY*camera.record.retain_days
|
||||||
|
|
||||||
|
for p in Path('/media/frigate/recordings').rglob("*.mp4"):
|
||||||
|
if not p.parent.name in delete_before:
|
||||||
|
continue
|
||||||
|
if p.stat().st_mtime < delete_before[p.parent.name]:
|
||||||
|
p.unlink(missing_ok=True)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
counter = 0
|
||||||
|
self.expire_files()
|
||||||
|
while(True):
|
||||||
|
if self.stop_event.is_set():
|
||||||
|
logger.info(f"Exiting recording maintenance...")
|
||||||
|
break
|
||||||
|
|
||||||
|
# only expire events every 10 minutes, but check for new files every 10 seconds
|
||||||
|
time.sleep(10)
|
||||||
|
counter = counter + 1
|
||||||
|
if counter > 60:
|
||||||
|
self.expire_files()
|
||||||
|
remove_empty_directories(RECORD_DIR)
|
||||||
|
counter = 0
|
||||||
|
|
||||||
|
self.move_files()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
70
frigate/stats.py
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
|
from frigate.config import FrigateConfig
|
||||||
|
from frigate.version import VERSION
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
def stats_init(camera_metrics, detectors):
|
||||||
|
stats_tracking = {
|
||||||
|
'camera_metrics': camera_metrics,
|
||||||
|
'detectors': detectors,
|
||||||
|
'started': int(time.time())
|
||||||
|
}
|
||||||
|
return stats_tracking
|
||||||
|
|
||||||
|
def stats_snapshot(stats_tracking):
|
||||||
|
camera_metrics = stats_tracking['camera_metrics']
|
||||||
|
stats = {}
|
||||||
|
|
||||||
|
total_detection_fps = 0
|
||||||
|
|
||||||
|
for name, camera_stats in camera_metrics.items():
|
||||||
|
total_detection_fps += camera_stats['detection_fps'].value
|
||||||
|
stats[name] = {
|
||||||
|
'camera_fps': round(camera_stats['camera_fps'].value, 2),
|
||||||
|
'process_fps': round(camera_stats['process_fps'].value, 2),
|
||||||
|
'skipped_fps': round(camera_stats['skipped_fps'].value, 2),
|
||||||
|
'detection_fps': round(camera_stats['detection_fps'].value, 2),
|
||||||
|
'pid': camera_stats['process'].pid,
|
||||||
|
'capture_pid': camera_stats['capture_process'].pid
|
||||||
|
}
|
||||||
|
|
||||||
|
stats['detectors'] = {}
|
||||||
|
for name, detector in stats_tracking["detectors"].items():
|
||||||
|
stats['detectors'][name] = {
|
||||||
|
'inference_speed': round(detector.avg_inference_speed.value * 1000, 2),
|
||||||
|
'detection_start': detector.detection_start.value,
|
||||||
|
'pid': detector.detect_process.pid
|
||||||
|
}
|
||||||
|
stats['detection_fps'] = round(total_detection_fps, 2)
|
||||||
|
|
||||||
|
stats['service'] = {
|
||||||
|
'uptime': (int(time.time()) - stats_tracking['started']),
|
||||||
|
'version': VERSION
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats
|
||||||
|
|
||||||
|
class StatsEmitter(threading.Thread):
|
||||||
|
def __init__(self, config: FrigateConfig, stats_tracking, mqtt_client, topic_prefix, stop_event):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.name = 'frigate_stats_emitter'
|
||||||
|
self.config = config
|
||||||
|
self.stats_tracking = stats_tracking
|
||||||
|
self.mqtt_client = mqtt_client
|
||||||
|
self.topic_prefix = topic_prefix
|
||||||
|
self.stop_event = stop_event
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
time.sleep(10)
|
||||||
|
while True:
|
||||||
|
if self.stop_event.is_set():
|
||||||
|
logger.info(f"Exiting watchdog...")
|
||||||
|
break
|
||||||
|
stats = stats_snapshot(self.stats_tracking)
|
||||||
|
self.mqtt_client.publish(f"{self.topic_prefix}/stats", json.dumps(stats), retain=False)
|
||||||
|
time.sleep(self.config.mqtt.stats_interval)
|
||||||
0
frigate/test/__init__.py
Normal file
342
frigate/test/test_config.py
Normal file
@@ -0,0 +1,342 @@
|
|||||||
|
import json
|
||||||
|
from unittest import TestCase, main
|
||||||
|
import voluptuous as vol
|
||||||
|
from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig
|
||||||
|
|
||||||
|
class TestConfig(TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.minimal = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
def test_empty(self):
|
||||||
|
FRIGATE_CONFIG_SCHEMA({})
|
||||||
|
|
||||||
|
def test_minimal(self):
|
||||||
|
FRIGATE_CONFIG_SCHEMA(self.minimal)
|
||||||
|
|
||||||
|
def test_config_class(self):
|
||||||
|
FrigateConfig(config=self.minimal)
|
||||||
|
|
||||||
|
def test_inherit_tracked_objects(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'objects': {
|
||||||
|
'track': ['person', 'dog']
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frigate_config = FrigateConfig(config=config)
|
||||||
|
assert('dog' in frigate_config.cameras['back'].objects.track)
|
||||||
|
|
||||||
|
def test_override_tracked_objects(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'objects': {
|
||||||
|
'track': ['person', 'dog']
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920,
|
||||||
|
'objects': {
|
||||||
|
'track': ['cat']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frigate_config = FrigateConfig(config=config)
|
||||||
|
assert('cat' in frigate_config.cameras['back'].objects.track)
|
||||||
|
|
||||||
|
def test_default_object_filters(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'objects': {
|
||||||
|
'track': ['person', 'dog']
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frigate_config = FrigateConfig(config=config)
|
||||||
|
assert('dog' in frigate_config.cameras['back'].objects.filters)
|
||||||
|
|
||||||
|
def test_inherit_object_filters(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'objects': {
|
||||||
|
'track': ['person', 'dog'],
|
||||||
|
'filters': {
|
||||||
|
'dog': {
|
||||||
|
'threshold': 0.7
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frigate_config = FrigateConfig(config=config)
|
||||||
|
assert('dog' in frigate_config.cameras['back'].objects.filters)
|
||||||
|
assert(frigate_config.cameras['back'].objects.filters['dog'].threshold == 0.7)
|
||||||
|
|
||||||
|
def test_override_object_filters(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920,
|
||||||
|
'objects': {
|
||||||
|
'track': ['person', 'dog'],
|
||||||
|
'filters': {
|
||||||
|
'dog': {
|
||||||
|
'threshold': 0.7
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frigate_config = FrigateConfig(config=config)
|
||||||
|
assert('dog' in frigate_config.cameras['back'].objects.filters)
|
||||||
|
assert(frigate_config.cameras['back'].objects.filters['dog'].threshold == 0.7)
|
||||||
|
|
||||||
|
def test_ffmpeg_params(self):
|
||||||
|
config = {
|
||||||
|
'ffmpeg': {
|
||||||
|
'input_args': ['-re']
|
||||||
|
},
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920,
|
||||||
|
'objects': {
|
||||||
|
'track': ['person', 'dog'],
|
||||||
|
'filters': {
|
||||||
|
'dog': {
|
||||||
|
'threshold': 0.7
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frigate_config = FrigateConfig(config=config)
|
||||||
|
assert('-re' in frigate_config.cameras['back'].ffmpeg_cmds[0]['cmd'])
|
||||||
|
|
||||||
|
def test_inherit_clips_retention(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'clips': {
|
||||||
|
'retain': {
|
||||||
|
'default': 20,
|
||||||
|
'objects': {
|
||||||
|
'person': 30
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frigate_config = FrigateConfig(config=config)
|
||||||
|
assert(frigate_config.cameras['back'].clips.retain.objects['person'] == 30)
|
||||||
|
|
||||||
|
def test_roles_listed_twice_throws_error(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'clips': {
|
||||||
|
'retain': {
|
||||||
|
'default': 20,
|
||||||
|
'objects': {
|
||||||
|
'person': 30
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] },
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video2', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.assertRaises(vol.MultipleInvalid, lambda: FrigateConfig(config=config))
|
||||||
|
|
||||||
|
def test_zone_matching_camera_name_throws_error(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'clips': {
|
||||||
|
'retain': {
|
||||||
|
'default': 20,
|
||||||
|
'objects': {
|
||||||
|
'person': 30
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920,
|
||||||
|
'zones': {
|
||||||
|
'back': {
|
||||||
|
'coordinates': '1,1,1,1,1,1'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.assertRaises(vol.MultipleInvalid, lambda: FrigateConfig(config=config))
|
||||||
|
|
||||||
|
def test_clips_should_default_to_global_objects(self):
|
||||||
|
config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'clips': {
|
||||||
|
'retain': {
|
||||||
|
'default': 20,
|
||||||
|
'objects': {
|
||||||
|
'person': 30
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'objects': {
|
||||||
|
'track': ['person', 'dog']
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920,
|
||||||
|
'clips': {
|
||||||
|
'enabled': True
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
config = FrigateConfig(config=config)
|
||||||
|
assert(config.cameras['back'].clips.objects is None)
|
||||||
|
|
||||||
|
def test_role_assigned_but_not_enabled(self):
|
||||||
|
json_config = {
|
||||||
|
'mqtt': {
|
||||||
|
'host': 'mqtt'
|
||||||
|
},
|
||||||
|
'cameras': {
|
||||||
|
'back': {
|
||||||
|
'ffmpeg': {
|
||||||
|
'inputs': [
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect', 'rtmp'] },
|
||||||
|
{ 'path': 'rtsp://10.0.0.1:554/record', 'roles': ['record'] }
|
||||||
|
]
|
||||||
|
},
|
||||||
|
'height': 1080,
|
||||||
|
'width': 1920
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config = FrigateConfig(config=json_config)
|
||||||
|
ffmpeg_cmds = config.cameras['back'].ffmpeg_cmds
|
||||||
|
assert(len(ffmpeg_cmds) == 1)
|
||||||
|
assert(not 'clips' in ffmpeg_cmds[0]['roles'])
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main(verbosity=2)
|
||||||
39
frigate/test/test_yuv_region_2_rgb.py
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
from unittest import TestCase, main
|
||||||
|
from frigate.util import yuv_region_2_rgb
|
||||||
|
|
||||||
|
class TestYuvRegion2RGB(TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.bgr_frame = np.zeros((100, 200, 3), np.uint8)
|
||||||
|
self.bgr_frame[:] = (0, 0, 255)
|
||||||
|
self.bgr_frame[5:55, 5:55] = (255,0,0)
|
||||||
|
# cv2.imwrite(f"bgr_frame.jpg", self.bgr_frame)
|
||||||
|
self.yuv_frame = cv2.cvtColor(self.bgr_frame, cv2.COLOR_BGR2YUV_I420)
|
||||||
|
|
||||||
|
def test_crop_yuv(self):
|
||||||
|
cropped = yuv_region_2_rgb(self.yuv_frame, (10,10,50,50))
|
||||||
|
# ensure the upper left pixel is blue
|
||||||
|
assert(np.all(cropped[0, 0] == [0, 0, 255]))
|
||||||
|
|
||||||
|
def test_crop_yuv_out_of_bounds(self):
|
||||||
|
cropped = yuv_region_2_rgb(self.yuv_frame, (0,0,200,200))
|
||||||
|
# cv2.imwrite(f"cropped.jpg", cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
|
||||||
|
# ensure the upper left pixel is red
|
||||||
|
# the yuv conversion has some noise
|
||||||
|
assert(np.all(cropped[0, 0] == [255, 1, 0]))
|
||||||
|
# ensure the bottom right is black
|
||||||
|
assert(np.all(cropped[199, 199] == [0, 0, 0]))
|
||||||
|
|
||||||
|
def test_crop_yuv_portrait(self):
|
||||||
|
bgr_frame = np.zeros((1920, 1080, 3), np.uint8)
|
||||||
|
bgr_frame[:] = (0, 0, 255)
|
||||||
|
bgr_frame[5:55, 5:55] = (255,0,0)
|
||||||
|
# cv2.imwrite(f"bgr_frame.jpg", self.bgr_frame)
|
||||||
|
yuv_frame = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2YUV_I420)
|
||||||
|
|
||||||
|
cropped = yuv_region_2_rgb(yuv_frame, (0, 852, 648, 1500))
|
||||||
|
# cv2.imwrite(f"cropped.jpg", cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main(verbosity=2)
|
||||||
374
frigate/util.py
Normal file → Executable file
@@ -1,26 +1,374 @@
|
|||||||
import numpy as np
|
import collections
|
||||||
|
import datetime
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import signal
|
||||||
|
import subprocess as sp
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from multiprocessing import shared_memory
|
||||||
|
from typing import AnyStr
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
# convert shared memory array into numpy array
|
logger = logging.getLogger(__name__)
|
||||||
def tonumpyarray(mp_arr):
|
|
||||||
return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)
|
|
||||||
|
|
||||||
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label):
|
|
||||||
color = (255,0,0)
|
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'):
|
||||||
cv2.rectangle(frame, (x_min, y_min),
|
if color is None:
|
||||||
(x_max, y_max),
|
color = (0,0,255)
|
||||||
color, 2)
|
display_text = "{}: {}".format(label, info)
|
||||||
|
cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), color, thickness)
|
||||||
font_scale = 0.5
|
font_scale = 0.5
|
||||||
font = cv2.FONT_HERSHEY_SIMPLEX
|
font = cv2.FONT_HERSHEY_SIMPLEX
|
||||||
# get the width and height of the text box
|
# get the width and height of the text box
|
||||||
size = cv2.getTextSize(label, font, fontScale=font_scale, thickness=2)
|
size = cv2.getTextSize(display_text, font, fontScale=font_scale, thickness=2)
|
||||||
text_width = size[0][0]
|
text_width = size[0][0]
|
||||||
text_height = size[0][1]
|
text_height = size[0][1]
|
||||||
line_height = text_height + size[1]
|
line_height = text_height + size[1]
|
||||||
# set the text start position
|
# set the text start position
|
||||||
text_offset_x = x_min
|
if position == 'ul':
|
||||||
text_offset_y = 0 if y_min < line_height else y_min - (line_height+8)
|
text_offset_x = x_min
|
||||||
|
text_offset_y = 0 if y_min < line_height else y_min - (line_height+8)
|
||||||
|
elif position == 'ur':
|
||||||
|
text_offset_x = x_max - (text_width+8)
|
||||||
|
text_offset_y = 0 if y_min < line_height else y_min - (line_height+8)
|
||||||
|
elif position == 'bl':
|
||||||
|
text_offset_x = x_min
|
||||||
|
text_offset_y = y_max
|
||||||
|
elif position == 'br':
|
||||||
|
text_offset_x = x_max - (text_width+8)
|
||||||
|
text_offset_y = y_max
|
||||||
# make the coords of the box with a small padding of two pixels
|
# make the coords of the box with a small padding of two pixels
|
||||||
textbox_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y + line_height))
|
textbox_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y + line_height))
|
||||||
cv2.rectangle(frame, textbox_coords[0], textbox_coords[1], color, cv2.FILLED)
|
cv2.rectangle(frame, textbox_coords[0], textbox_coords[1], color, cv2.FILLED)
|
||||||
cv2.putText(frame, label, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
|
cv2.putText(frame, display_text, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
|
||||||
|
|
||||||
|
def calculate_region(frame_shape, xmin, ymin, xmax, ymax, multiplier=2):
|
||||||
|
# size is the longest edge and divisible by 4
|
||||||
|
size = int(max(xmax-xmin, ymax-ymin)//4*4*multiplier)
|
||||||
|
# dont go any smaller than 300
|
||||||
|
if size < 300:
|
||||||
|
size = 300
|
||||||
|
|
||||||
|
# x_offset is midpoint of bounding box minus half the size
|
||||||
|
x_offset = int((xmax-xmin)/2.0+xmin-size/2.0)
|
||||||
|
# if outside the image
|
||||||
|
if x_offset < 0:
|
||||||
|
x_offset = 0
|
||||||
|
elif x_offset > (frame_shape[1]-size):
|
||||||
|
x_offset = max(0, (frame_shape[1]-size))
|
||||||
|
|
||||||
|
# y_offset is midpoint of bounding box minus half the size
|
||||||
|
y_offset = int((ymax-ymin)/2.0+ymin-size/2.0)
|
||||||
|
# # if outside the image
|
||||||
|
if y_offset < 0:
|
||||||
|
y_offset = 0
|
||||||
|
elif y_offset > (frame_shape[0]-size):
|
||||||
|
y_offset = max(0, (frame_shape[0]-size))
|
||||||
|
|
||||||
|
return (x_offset, y_offset, x_offset+size, y_offset+size)
|
||||||
|
|
||||||
|
def get_yuv_crop(frame_shape, crop):
|
||||||
|
# crop should be (x1,y1,x2,y2)
|
||||||
|
frame_height = frame_shape[0]//3*2
|
||||||
|
frame_width = frame_shape[1]
|
||||||
|
|
||||||
|
# compute the width/height of the uv channels
|
||||||
|
uv_width = frame_width//2 # width of the uv channels
|
||||||
|
uv_height = frame_height//4 # height of the uv channels
|
||||||
|
|
||||||
|
# compute the offset for upper left corner of the uv channels
|
||||||
|
uv_x_offset = crop[0]//2 # x offset of the uv channels
|
||||||
|
uv_y_offset = crop[1]//4 # y offset of the uv channels
|
||||||
|
|
||||||
|
# compute the width/height of the uv crops
|
||||||
|
uv_crop_width = (crop[2] - crop[0])//2 # width of the cropped uv channels
|
||||||
|
uv_crop_height = (crop[3] - crop[1])//4 # height of the cropped uv channels
|
||||||
|
|
||||||
|
# ensure crop dimensions are multiples of 2 and 4
|
||||||
|
y = (
|
||||||
|
crop[0],
|
||||||
|
crop[1],
|
||||||
|
crop[0] + uv_crop_width*2,
|
||||||
|
crop[1] + uv_crop_height*4
|
||||||
|
)
|
||||||
|
|
||||||
|
u1 = (
|
||||||
|
0 + uv_x_offset,
|
||||||
|
frame_height + uv_y_offset,
|
||||||
|
0 + uv_x_offset + uv_crop_width,
|
||||||
|
frame_height + uv_y_offset + uv_crop_height
|
||||||
|
)
|
||||||
|
|
||||||
|
u2 = (
|
||||||
|
uv_width + uv_x_offset,
|
||||||
|
frame_height + uv_y_offset,
|
||||||
|
uv_width + uv_x_offset + uv_crop_width,
|
||||||
|
frame_height + uv_y_offset + uv_crop_height
|
||||||
|
)
|
||||||
|
|
||||||
|
v1 = (
|
||||||
|
0 + uv_x_offset,
|
||||||
|
frame_height + uv_height + uv_y_offset,
|
||||||
|
0 + uv_x_offset + uv_crop_width,
|
||||||
|
frame_height + uv_height + uv_y_offset + uv_crop_height
|
||||||
|
)
|
||||||
|
|
||||||
|
v2 = (
|
||||||
|
uv_width + uv_x_offset,
|
||||||
|
frame_height + uv_height + uv_y_offset,
|
||||||
|
uv_width + uv_x_offset + uv_crop_width,
|
||||||
|
frame_height + uv_height + uv_y_offset + uv_crop_height
|
||||||
|
)
|
||||||
|
|
||||||
|
return y, u1, u2, v1, v2
|
||||||
|
|
||||||
|
def yuv_region_2_rgb(frame, region):
|
||||||
|
try:
|
||||||
|
height = frame.shape[0]//3*2
|
||||||
|
width = frame.shape[1]
|
||||||
|
|
||||||
|
# get the crop box if the region extends beyond the frame
|
||||||
|
crop_x1 = max(0, region[0])
|
||||||
|
crop_y1 = max(0, region[1])
|
||||||
|
# ensure these are a multiple of 4
|
||||||
|
crop_x2 = min(width, region[2])
|
||||||
|
crop_y2 = min(height, region[3])
|
||||||
|
crop_box = (crop_x1, crop_y1, crop_x2, crop_y2)
|
||||||
|
|
||||||
|
y, u1, u2, v1, v2 = get_yuv_crop(frame.shape, crop_box)
|
||||||
|
|
||||||
|
# if the region starts outside the frame, indent the start point in the cropped frame
|
||||||
|
y_channel_x_offset = abs(min(0, region[0]))
|
||||||
|
y_channel_y_offset = abs(min(0, region[1]))
|
||||||
|
|
||||||
|
uv_channel_x_offset = y_channel_x_offset//2
|
||||||
|
uv_channel_y_offset = y_channel_y_offset//4
|
||||||
|
|
||||||
|
# create the yuv region frame
|
||||||
|
# make sure the size is a multiple of 4
|
||||||
|
size = (region[3] - region[1])//4*4
|
||||||
|
yuv_cropped_frame = np.zeros((size+size//2, size), np.uint8)
|
||||||
|
# fill in black
|
||||||
|
yuv_cropped_frame[:] = 128
|
||||||
|
yuv_cropped_frame[0:size,0:size] = 16
|
||||||
|
|
||||||
|
# copy the y channel
|
||||||
|
yuv_cropped_frame[
|
||||||
|
y_channel_y_offset:y_channel_y_offset + y[3] - y[1],
|
||||||
|
y_channel_x_offset:y_channel_x_offset + y[2] - y[0]
|
||||||
|
] = frame[
|
||||||
|
y[1]:y[3],
|
||||||
|
y[0]:y[2]
|
||||||
|
]
|
||||||
|
|
||||||
|
uv_crop_width = u1[2] - u1[0]
|
||||||
|
uv_crop_height = u1[3] - u1[1]
|
||||||
|
|
||||||
|
# copy u1
|
||||||
|
yuv_cropped_frame[
|
||||||
|
size + uv_channel_y_offset:size + uv_channel_y_offset + uv_crop_height,
|
||||||
|
0 + uv_channel_x_offset:0 + uv_channel_x_offset + uv_crop_width
|
||||||
|
] = frame[
|
||||||
|
u1[1]:u1[3],
|
||||||
|
u1[0]:u1[2]
|
||||||
|
]
|
||||||
|
|
||||||
|
# copy u2
|
||||||
|
yuv_cropped_frame[
|
||||||
|
size + uv_channel_y_offset:size + uv_channel_y_offset + uv_crop_height,
|
||||||
|
size//2 + uv_channel_x_offset:size//2 + uv_channel_x_offset + uv_crop_width
|
||||||
|
] = frame[
|
||||||
|
u2[1]:u2[3],
|
||||||
|
u2[0]:u2[2]
|
||||||
|
]
|
||||||
|
|
||||||
|
# copy v1
|
||||||
|
yuv_cropped_frame[
|
||||||
|
size+size//4 + uv_channel_y_offset:size+size//4 + uv_channel_y_offset + uv_crop_height,
|
||||||
|
0 + uv_channel_x_offset:0 + uv_channel_x_offset + uv_crop_width
|
||||||
|
] = frame[
|
||||||
|
v1[1]:v1[3],
|
||||||
|
v1[0]:v1[2]
|
||||||
|
]
|
||||||
|
|
||||||
|
# copy v2
|
||||||
|
yuv_cropped_frame[
|
||||||
|
size+size//4 + uv_channel_y_offset:size+size//4 + uv_channel_y_offset + uv_crop_height,
|
||||||
|
size//2 + uv_channel_x_offset:size//2 + uv_channel_x_offset + uv_crop_width
|
||||||
|
] = frame[
|
||||||
|
v2[1]:v2[3],
|
||||||
|
v2[0]:v2[2]
|
||||||
|
]
|
||||||
|
|
||||||
|
return cv2.cvtColor(yuv_cropped_frame, cv2.COLOR_YUV2RGB_I420)
|
||||||
|
except:
|
||||||
|
print(f"frame.shape: {frame.shape}")
|
||||||
|
print(f"region: {region}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def intersection(box_a, box_b):
|
||||||
|
return (
|
||||||
|
max(box_a[0], box_b[0]),
|
||||||
|
max(box_a[1], box_b[1]),
|
||||||
|
min(box_a[2], box_b[2]),
|
||||||
|
min(box_a[3], box_b[3])
|
||||||
|
)
|
||||||
|
|
||||||
|
def area(box):
|
||||||
|
return (box[2]-box[0] + 1)*(box[3]-box[1] + 1)
|
||||||
|
|
||||||
|
def intersection_over_union(box_a, box_b):
|
||||||
|
# determine the (x, y)-coordinates of the intersection rectangle
|
||||||
|
intersect = intersection(box_a, box_b)
|
||||||
|
|
||||||
|
# compute the area of intersection rectangle
|
||||||
|
inter_area = max(0, intersect[2] - intersect[0] + 1) * max(0, intersect[3] - intersect[1] + 1)
|
||||||
|
|
||||||
|
if inter_area == 0:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
# compute the area of both the prediction and ground-truth
|
||||||
|
# rectangles
|
||||||
|
box_a_area = (box_a[2] - box_a[0] + 1) * (box_a[3] - box_a[1] + 1)
|
||||||
|
box_b_area = (box_b[2] - box_b[0] + 1) * (box_b[3] - box_b[1] + 1)
|
||||||
|
|
||||||
|
# compute the intersection over union by taking the intersection
|
||||||
|
# area and dividing it by the sum of prediction + ground-truth
|
||||||
|
# areas - the interesection area
|
||||||
|
iou = inter_area / float(box_a_area + box_b_area - inter_area)
|
||||||
|
|
||||||
|
# return the intersection over union value
|
||||||
|
return iou
|
||||||
|
|
||||||
|
def clipped(obj, frame_shape):
|
||||||
|
# if the object is within 5 pixels of the region border, and the region is not on the edge
|
||||||
|
# consider the object to be clipped
|
||||||
|
box = obj[2]
|
||||||
|
region = obj[4]
|
||||||
|
if ((region[0] > 5 and box[0]-region[0] <= 5) or
|
||||||
|
(region[1] > 5 and box[1]-region[1] <= 5) or
|
||||||
|
(frame_shape[1]-region[2] > 5 and region[2]-box[2] <= 5) or
|
||||||
|
(frame_shape[0]-region[3] > 5 and region[3]-box[3] <= 5)):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
class EventsPerSecond:
|
||||||
|
def __init__(self, max_events=1000):
|
||||||
|
self._start = None
|
||||||
|
self._max_events = max_events
|
||||||
|
self._timestamps = []
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
self._start = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
def update(self):
|
||||||
|
if self._start is None:
|
||||||
|
self.start()
|
||||||
|
self._timestamps.append(datetime.datetime.now().timestamp())
|
||||||
|
# truncate the list when it goes 100 over the max_size
|
||||||
|
if len(self._timestamps) > self._max_events+100:
|
||||||
|
self._timestamps = self._timestamps[(1-self._max_events):]
|
||||||
|
|
||||||
|
def eps(self, last_n_seconds=10):
|
||||||
|
if self._start is None:
|
||||||
|
self.start()
|
||||||
|
# compute the (approximate) events in the last n seconds
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
|
seconds = min(now-self._start, last_n_seconds)
|
||||||
|
return len([t for t in self._timestamps if t > (now-last_n_seconds)]) / seconds
|
||||||
|
|
||||||
|
def print_stack(sig, frame):
|
||||||
|
traceback.print_stack(frame)
|
||||||
|
|
||||||
|
def listen():
|
||||||
|
signal.signal(signal.SIGUSR1, print_stack)
|
||||||
|
|
||||||
|
def create_mask(frame_shape, mask):
|
||||||
|
mask_img = np.zeros(frame_shape, np.uint8)
|
||||||
|
mask_img[:] = 255
|
||||||
|
|
||||||
|
if isinstance(mask, list):
|
||||||
|
for m in mask:
|
||||||
|
add_mask(m, mask_img)
|
||||||
|
|
||||||
|
elif isinstance(mask, str):
|
||||||
|
add_mask(mask, mask_img)
|
||||||
|
|
||||||
|
return mask_img
|
||||||
|
|
||||||
|
def add_mask(mask, mask_img):
|
||||||
|
points = mask.split(',')
|
||||||
|
contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
|
||||||
|
cv2.fillPoly(mask_img, pts=[contour], color=(0))
|
||||||
|
|
||||||
|
class FrameManager(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
def create(self, name, size) -> AnyStr:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get(self, name, timeout_ms=0):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def close(self, name):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete(self, name):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class DictFrameManager(FrameManager):
|
||||||
|
def __init__(self):
|
||||||
|
self.frames = {}
|
||||||
|
|
||||||
|
def create(self, name, size) -> AnyStr:
|
||||||
|
mem = bytearray(size)
|
||||||
|
self.frames[name] = mem
|
||||||
|
return mem
|
||||||
|
|
||||||
|
def get(self, name, shape):
|
||||||
|
mem = self.frames[name]
|
||||||
|
return np.ndarray(shape, dtype=np.uint8, buffer=mem)
|
||||||
|
|
||||||
|
def close(self, name):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def delete(self, name):
|
||||||
|
del self.frames[name]
|
||||||
|
|
||||||
|
class SharedMemoryFrameManager(FrameManager):
|
||||||
|
def __init__(self):
|
||||||
|
self.shm_store = {}
|
||||||
|
|
||||||
|
def create(self, name, size) -> AnyStr:
|
||||||
|
shm = shared_memory.SharedMemory(name=name, create=True, size=size)
|
||||||
|
self.shm_store[name] = shm
|
||||||
|
return shm.buf
|
||||||
|
|
||||||
|
def get(self, name, shape):
|
||||||
|
if name in self.shm_store:
|
||||||
|
shm = self.shm_store[name]
|
||||||
|
else:
|
||||||
|
shm = shared_memory.SharedMemory(name=name)
|
||||||
|
self.shm_store[name] = shm
|
||||||
|
return np.ndarray(shape, dtype=np.uint8, buffer=shm.buf)
|
||||||
|
|
||||||
|
def close(self, name):
|
||||||
|
if name in self.shm_store:
|
||||||
|
self.shm_store[name].close()
|
||||||
|
del self.shm_store[name]
|
||||||
|
|
||||||
|
def delete(self, name):
|
||||||
|
if name in self.shm_store:
|
||||||
|
self.shm_store[name].close()
|
||||||
|
self.shm_store[name].unlink()
|
||||||
|
del self.shm_store[name]
|
||||||
|
|||||||
686
frigate/video.py
Normal file → Executable file
@@ -1,323 +1,429 @@
|
|||||||
import os
|
import base64
|
||||||
import time
|
import copy
|
||||||
import datetime
|
|
||||||
import cv2
|
|
||||||
import threading
|
|
||||||
import ctypes
|
import ctypes
|
||||||
|
import datetime
|
||||||
|
import itertools
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
|
import os
|
||||||
|
import queue
|
||||||
import subprocess as sp
|
import subprocess as sp
|
||||||
|
import signal
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from collections import defaultdict
|
||||||
|
from setproctitle import setproctitle
|
||||||
|
from typing import Dict, List
|
||||||
|
|
||||||
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from . util import tonumpyarray, draw_box_with_label
|
|
||||||
from . object_detection import FramePrepper
|
|
||||||
from . objects import ObjectCleaner, BestPersonFrame
|
|
||||||
from . mqtt import MqttObjectPublisher
|
|
||||||
|
|
||||||
# Stores 2 seconds worth of frames when motion is detected so they can be used for other threads
|
from frigate.config import CameraConfig
|
||||||
class FrameTracker(threading.Thread):
|
from frigate.edgetpu import RemoteObjectDetector
|
||||||
def __init__(self, shared_frame, frame_time, frame_ready, frame_lock, recent_frames):
|
from frigate.log import LogPipe
|
||||||
threading.Thread.__init__(self)
|
from frigate.motion import MotionDetector
|
||||||
self.shared_frame = shared_frame
|
from frigate.objects import ObjectTracker
|
||||||
self.frame_time = frame_time
|
from frigate.util import (EventsPerSecond, FrameManager,
|
||||||
self.frame_ready = frame_ready
|
SharedMemoryFrameManager, area, calculate_region,
|
||||||
self.frame_lock = frame_lock
|
clipped, draw_box_with_label, intersection,
|
||||||
self.recent_frames = recent_frames
|
intersection_over_union, listen, yuv_region_2_rgb)
|
||||||
|
|
||||||
def run(self):
|
logger = logging.getLogger(__name__)
|
||||||
frame_time = 0.0
|
|
||||||
while True:
|
|
||||||
now = datetime.datetime.now().timestamp()
|
|
||||||
# wait for a frame
|
|
||||||
with self.frame_ready:
|
|
||||||
# if there isnt a frame ready for processing or it is old, wait for a signal
|
|
||||||
if self.frame_time.value == frame_time or (now - self.frame_time.value) > 0.5:
|
|
||||||
self.frame_ready.wait()
|
|
||||||
|
|
||||||
# lock and make a copy of the frame
|
|
||||||
with self.frame_lock:
|
|
||||||
frame = self.shared_frame.copy()
|
|
||||||
frame_time = self.frame_time.value
|
|
||||||
|
|
||||||
# add the frame to recent frames
|
|
||||||
self.recent_frames[frame_time] = frame
|
|
||||||
|
|
||||||
# delete any old frames
|
def filtered(obj, objects_to_track, object_filters):
|
||||||
stored_frame_times = list(self.recent_frames.keys())
|
object_name = obj[0]
|
||||||
for k in stored_frame_times:
|
|
||||||
if (now - k) > 2:
|
|
||||||
del self.recent_frames[k]
|
|
||||||
|
|
||||||
def get_frame_shape(rtsp_url):
|
if not object_name in objects_to_track:
|
||||||
# capture a single frame and check the frame shape so the correct array
|
return True
|
||||||
# size can be allocated in memory
|
|
||||||
video = cv2.VideoCapture(rtsp_url)
|
if object_name in object_filters:
|
||||||
ret, frame = video.read()
|
obj_settings = object_filters[object_name]
|
||||||
frame_shape = frame.shape
|
|
||||||
video.release()
|
|
||||||
return frame_shape
|
|
||||||
|
|
||||||
def get_rtsp_url(rtsp_config):
|
# if the min area is larger than the
|
||||||
if (rtsp_config['password'].startswith('$')):
|
# detected object, don't add it to detected objects
|
||||||
rtsp_config['password'] = os.getenv(rtsp_config['password'][1:])
|
if obj_settings.min_area > obj[3]:
|
||||||
return 'rtsp://{}:{}@{}:{}{}'.format(rtsp_config['user'],
|
return True
|
||||||
rtsp_config['password'], rtsp_config['host'], rtsp_config['port'],
|
|
||||||
rtsp_config['path'])
|
# if the detected object is larger than the
|
||||||
|
# max area, don't add it to detected objects
|
||||||
|
if obj_settings.max_area < obj[3]:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# if the score is lower than the min_score, skip
|
||||||
|
if obj_settings.min_score > obj[1]:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if not obj_settings.mask is None:
|
||||||
|
# compute the coordinates of the object and make sure
|
||||||
|
# the location isnt outside the bounds of the image (can happen from rounding)
|
||||||
|
y_location = min(int(obj[2][3]), len(obj_settings.mask)-1)
|
||||||
|
x_location = min(int((obj[2][2]-obj[2][0])/2.0)+obj[2][0], len(obj_settings.mask[0])-1)
|
||||||
|
|
||||||
|
# if the object is in a masked location, don't add it to detected objects
|
||||||
|
if obj_settings.mask[y_location][x_location] == 0:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def create_tensor_input(frame, model_shape, region):
|
||||||
|
cropped_frame = yuv_region_2_rgb(frame, region)
|
||||||
|
|
||||||
|
# Resize to 300x300 if needed
|
||||||
|
if cropped_frame.shape != (model_shape[0], model_shape[1], 3):
|
||||||
|
cropped_frame = cv2.resize(cropped_frame, dsize=model_shape, interpolation=cv2.INTER_LINEAR)
|
||||||
|
|
||||||
|
# Expand dimensions since the model expects images to have shape: [1, height, width, 3]
|
||||||
|
return np.expand_dims(cropped_frame, axis=0)
|
||||||
|
|
||||||
|
def stop_ffmpeg(ffmpeg_process, logger):
|
||||||
|
logger.info("Terminating the existing ffmpeg process...")
|
||||||
|
ffmpeg_process.terminate()
|
||||||
|
try:
|
||||||
|
logger.info("Waiting for ffmpeg to exit gracefully...")
|
||||||
|
ffmpeg_process.communicate(timeout=30)
|
||||||
|
except sp.TimeoutExpired:
|
||||||
|
logger.info("FFmpeg didnt exit. Force killing...")
|
||||||
|
ffmpeg_process.kill()
|
||||||
|
ffmpeg_process.communicate()
|
||||||
|
ffmpeg_process = None
|
||||||
|
|
||||||
|
def start_or_restart_ffmpeg(ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None):
|
||||||
|
if not ffmpeg_process is None:
|
||||||
|
stop_ffmpeg(ffmpeg_process, logger)
|
||||||
|
|
||||||
|
if frame_size is None:
|
||||||
|
process = sp.Popen(ffmpeg_cmd, stdout = sp.DEVNULL, stderr=logpipe, stdin = sp.DEVNULL, start_new_session=True)
|
||||||
|
else:
|
||||||
|
process = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, stderr=logpipe, stdin = sp.DEVNULL, bufsize=frame_size*10, start_new_session=True)
|
||||||
|
return process
|
||||||
|
|
||||||
|
def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: FrameManager,
|
||||||
|
frame_queue, fps:mp.Value, skipped_fps: mp.Value, current_frame: mp.Value):
|
||||||
|
|
||||||
|
frame_size = frame_shape[0] * frame_shape[1]
|
||||||
|
frame_rate = EventsPerSecond()
|
||||||
|
frame_rate.start()
|
||||||
|
skipped_eps = EventsPerSecond()
|
||||||
|
skipped_eps.start()
|
||||||
|
while True:
|
||||||
|
fps.value = frame_rate.eps()
|
||||||
|
skipped_fps = skipped_eps.eps()
|
||||||
|
|
||||||
|
current_frame.value = datetime.datetime.now().timestamp()
|
||||||
|
frame_name = f"{camera_name}{current_frame.value}"
|
||||||
|
frame_buffer = frame_manager.create(frame_name, frame_size)
|
||||||
|
try:
|
||||||
|
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
|
||||||
|
except Exception as e:
|
||||||
|
logger.info(f"{camera_name}: ffmpeg sent a broken frame. {e}")
|
||||||
|
|
||||||
|
if ffmpeg_process.poll() != None:
|
||||||
|
logger.info(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
|
||||||
|
frame_manager.delete(frame_name)
|
||||||
|
break
|
||||||
|
continue
|
||||||
|
|
||||||
|
frame_rate.update()
|
||||||
|
|
||||||
|
# if the queue is full, skip this frame
|
||||||
|
if frame_queue.full():
|
||||||
|
skipped_eps.update()
|
||||||
|
frame_manager.delete(frame_name)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# close the frame
|
||||||
|
frame_manager.close(frame_name)
|
||||||
|
|
||||||
|
# add to the queue
|
||||||
|
frame_queue.put(current_frame.value)
|
||||||
|
|
||||||
class CameraWatchdog(threading.Thread):
|
class CameraWatchdog(threading.Thread):
|
||||||
def __init__(self, camera):
|
def __init__(self, camera_name, config, frame_queue, camera_fps, ffmpeg_pid, stop_event):
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
self.camera = camera
|
self.logger = logging.getLogger(f"watchdog.{camera_name}")
|
||||||
|
self.camera_name = camera_name
|
||||||
def run(self):
|
|
||||||
|
|
||||||
while True:
|
|
||||||
# wait a bit before checking
|
|
||||||
time.sleep(10)
|
|
||||||
|
|
||||||
if (datetime.datetime.now().timestamp() - self.camera.frame_time.value) > 2:
|
|
||||||
print("last frame is more than 2 seconds old, restarting camera capture...")
|
|
||||||
self.camera.start_or_restart_capture()
|
|
||||||
time.sleep(5)
|
|
||||||
|
|
||||||
# Thread to read the stdout of the ffmpeg process and update the current frame
|
|
||||||
class CameraCapture(threading.Thread):
|
|
||||||
def __init__(self, camera):
|
|
||||||
threading.Thread.__init__(self)
|
|
||||||
self.camera = camera
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
frame_num = 0
|
|
||||||
while True:
|
|
||||||
if self.camera.ffmpeg_process.poll() != None:
|
|
||||||
print("ffmpeg process is not running. exiting capture thread...")
|
|
||||||
break
|
|
||||||
|
|
||||||
raw_image = self.camera.ffmpeg_process.stdout.read(self.camera.frame_size)
|
|
||||||
|
|
||||||
if len(raw_image) == 0:
|
|
||||||
print("ffmpeg didnt return a frame. something is wrong. exiting capture thread...")
|
|
||||||
break
|
|
||||||
|
|
||||||
frame_num += 1
|
|
||||||
if (frame_num % self.camera.take_frame) != 0:
|
|
||||||
continue
|
|
||||||
|
|
||||||
with self.camera.frame_lock:
|
|
||||||
self.camera.frame_time.value = datetime.datetime.now().timestamp()
|
|
||||||
|
|
||||||
self.camera.current_frame[:] = (
|
|
||||||
np
|
|
||||||
.frombuffer(raw_image, np.uint8)
|
|
||||||
.reshape(self.camera.frame_shape)
|
|
||||||
)
|
|
||||||
# Notify with the condition that a new frame is ready
|
|
||||||
with self.camera.frame_ready:
|
|
||||||
self.camera.frame_ready.notify_all()
|
|
||||||
|
|
||||||
class Camera:
|
|
||||||
def __init__(self, name, config, prepped_frame_queue, mqtt_client, mqtt_prefix):
|
|
||||||
self.name = name
|
|
||||||
self.config = config
|
self.config = config
|
||||||
self.detected_objects = []
|
|
||||||
self.recent_frames = {}
|
|
||||||
self.rtsp_url = get_rtsp_url(self.config['rtsp'])
|
|
||||||
self.take_frame = self.config.get('take_frame', 1)
|
|
||||||
self.ffmpeg_hwaccel_args = self.config.get('ffmpeg_hwaccel_args', [])
|
|
||||||
self.regions = self.config['regions']
|
|
||||||
self.frame_shape = get_frame_shape(self.rtsp_url)
|
|
||||||
self.frame_size = self.frame_shape[0] * self.frame_shape[1] * self.frame_shape[2]
|
|
||||||
self.mqtt_client = mqtt_client
|
|
||||||
self.mqtt_topic_prefix = '{}/{}'.format(mqtt_prefix, self.name)
|
|
||||||
|
|
||||||
# create a numpy array for the current frame in initialize to zeros
|
|
||||||
self.current_frame = np.zeros(self.frame_shape, np.uint8)
|
|
||||||
# create shared value for storing the frame_time
|
|
||||||
self.frame_time = mp.Value('d', 0.0)
|
|
||||||
# Lock to control access to the frame
|
|
||||||
self.frame_lock = mp.Lock()
|
|
||||||
# Condition for notifying that a new frame is ready
|
|
||||||
self.frame_ready = mp.Condition()
|
|
||||||
# Condition for notifying that objects were parsed
|
|
||||||
self.objects_parsed = mp.Condition()
|
|
||||||
|
|
||||||
self.ffmpeg_process = None
|
|
||||||
self.capture_thread = None
|
self.capture_thread = None
|
||||||
|
self.ffmpeg_detect_process = None
|
||||||
|
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect", logging.ERROR)
|
||||||
|
self.ffmpeg_other_processes = []
|
||||||
|
self.camera_fps = camera_fps
|
||||||
|
self.ffmpeg_pid = ffmpeg_pid
|
||||||
|
self.frame_queue = frame_queue
|
||||||
|
self.frame_shape = self.config.frame_shape_yuv
|
||||||
|
self.frame_size = self.frame_shape[0] * self.frame_shape[1]
|
||||||
|
self.stop_event = stop_event
|
||||||
|
|
||||||
# for each region, create a separate thread to resize the region and prep for detection
|
def run(self):
|
||||||
self.detection_prep_threads = []
|
self.start_ffmpeg_detect()
|
||||||
for region in self.config['regions']:
|
|
||||||
# set a default threshold of 0.5 if not defined
|
for c in self.config.ffmpeg_cmds:
|
||||||
if not 'threshold' in region:
|
if 'detect' in c['roles']:
|
||||||
region['threshold'] = 0.5
|
continue
|
||||||
if not isinstance(region['threshold'], float):
|
logpipe = LogPipe(f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}", logging.ERROR)
|
||||||
print('Threshold is not a float. Setting to 0.5 default.')
|
self.ffmpeg_other_processes.append({
|
||||||
region['threshold'] = 0.5
|
'cmd': c['cmd'],
|
||||||
self.detection_prep_threads.append(FramePrepper(
|
'logpipe': logpipe,
|
||||||
self.name,
|
'process': start_or_restart_ffmpeg(c['cmd'], self.logger, logpipe)
|
||||||
self.current_frame,
|
})
|
||||||
self.frame_time,
|
|
||||||
self.frame_ready,
|
|
||||||
self.frame_lock,
|
|
||||||
region['size'], region['x_offset'], region['y_offset'], region['threshold'],
|
|
||||||
prepped_frame_queue
|
|
||||||
))
|
|
||||||
|
|
||||||
# start a thread to store recent motion frames for processing
|
time.sleep(10)
|
||||||
self.frame_tracker = FrameTracker(self.current_frame, self.frame_time,
|
while True:
|
||||||
self.frame_ready, self.frame_lock, self.recent_frames)
|
if self.stop_event.is_set():
|
||||||
self.frame_tracker.start()
|
stop_ffmpeg(self.ffmpeg_detect_process, self.logger)
|
||||||
|
for p in self.ffmpeg_other_processes:
|
||||||
|
stop_ffmpeg(p['process'], self.logger)
|
||||||
|
p['logpipe'].close()
|
||||||
|
self.logpipe.close()
|
||||||
|
break
|
||||||
|
|
||||||
# start a thread to store the highest scoring recent person frame
|
now = datetime.datetime.now().timestamp()
|
||||||
self.best_person_frame = BestPersonFrame(self.objects_parsed, self.recent_frames, self.detected_objects)
|
|
||||||
self.best_person_frame.start()
|
|
||||||
|
|
||||||
# start a thread to expire objects from the detected objects list
|
if not self.capture_thread.is_alive():
|
||||||
self.object_cleaner = ObjectCleaner(self.objects_parsed, self.detected_objects)
|
self.start_ffmpeg_detect()
|
||||||
self.object_cleaner.start()
|
elif now - self.capture_thread.current_frame.value > 20:
|
||||||
|
self.logger.info(f"No frames received from {self.camera_name} in 20 seconds. Exiting ffmpeg...")
|
||||||
# start a thread to publish object scores (currently only person)
|
self.ffmpeg_detect_process.terminate()
|
||||||
mqtt_publisher = MqttObjectPublisher(self.mqtt_client, self.mqtt_topic_prefix, self.objects_parsed, self.detected_objects)
|
try:
|
||||||
mqtt_publisher.start()
|
self.logger.info("Waiting for ffmpeg to exit gracefully...")
|
||||||
|
self.ffmpeg_detect_process.communicate(timeout=30)
|
||||||
# create a watchdog thread for capture process
|
except sp.TimeoutExpired:
|
||||||
self.watchdog = CameraWatchdog(self)
|
self.logger.info("FFmpeg didnt exit. Force killing...")
|
||||||
|
self.ffmpeg_detect_process.kill()
|
||||||
# load in the mask for person detection
|
self.ffmpeg_detect_process.communicate()
|
||||||
if 'mask' in self.config:
|
|
||||||
self.mask = cv2.imread("/config/{}".format(self.config['mask']), cv2.IMREAD_GRAYSCALE)
|
|
||||||
else:
|
|
||||||
self.mask = None
|
|
||||||
|
|
||||||
if self.mask is None:
|
|
||||||
self.mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
|
|
||||||
self.mask[:] = 255
|
|
||||||
|
|
||||||
|
|
||||||
def start_or_restart_capture(self):
|
|
||||||
if not self.ffmpeg_process is None:
|
|
||||||
print("Killing the existing ffmpeg process...")
|
|
||||||
self.ffmpeg_process.kill()
|
|
||||||
self.ffmpeg_process.wait()
|
|
||||||
print("Waiting for the capture thread to exit...")
|
|
||||||
self.capture_thread.join()
|
|
||||||
self.ffmpeg_process = None
|
|
||||||
self.capture_thread = None
|
|
||||||
|
|
||||||
# create the process to capture frames from the RTSP stream and store in a shared array
|
for p in self.ffmpeg_other_processes:
|
||||||
print("Creating a new ffmpeg process...")
|
poll = p['process'].poll()
|
||||||
self.start_ffmpeg()
|
if poll == None:
|
||||||
|
continue
|
||||||
print("Creating a new capture thread...")
|
p['process'] = start_or_restart_ffmpeg(p['cmd'], self.logger, p['logpipe'], ffmpeg_process=p['process'])
|
||||||
self.capture_thread = CameraCapture(self)
|
|
||||||
print("Starting a new capture thread...")
|
# wait a bit before checking again
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
def start_ffmpeg_detect(self):
|
||||||
|
ffmpeg_cmd = [c['cmd'] for c in self.config.ffmpeg_cmds if 'detect' in c['roles']][0]
|
||||||
|
self.ffmpeg_detect_process = start_or_restart_ffmpeg(ffmpeg_cmd, self.logger, self.logpipe, self.frame_size)
|
||||||
|
self.ffmpeg_pid.value = self.ffmpeg_detect_process.pid
|
||||||
|
self.capture_thread = CameraCapture(self.camera_name, self.ffmpeg_detect_process, self.frame_shape, self.frame_queue,
|
||||||
|
self.camera_fps)
|
||||||
self.capture_thread.start()
|
self.capture_thread.start()
|
||||||
|
|
||||||
|
class CameraCapture(threading.Thread):
|
||||||
|
def __init__(self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.name = f"capture:{camera_name}"
|
||||||
|
self.camera_name = camera_name
|
||||||
|
self.frame_shape = frame_shape
|
||||||
|
self.frame_queue = frame_queue
|
||||||
|
self.fps = fps
|
||||||
|
self.skipped_fps = EventsPerSecond()
|
||||||
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
|
self.ffmpeg_process = ffmpeg_process
|
||||||
|
self.current_frame = mp.Value('d', 0.0)
|
||||||
|
self.last_frame = 0
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.skipped_fps.start()
|
||||||
|
capture_frames(self.ffmpeg_process, self.camera_name, self.frame_shape, self.frame_manager, self.frame_queue,
|
||||||
|
self.fps, self.skipped_fps, self.current_frame)
|
||||||
|
|
||||||
|
def capture_camera(name, config: CameraConfig, process_info):
|
||||||
|
stop_event = mp.Event()
|
||||||
|
def receiveSignal(signalNumber, frame):
|
||||||
|
stop_event.set()
|
||||||
|
|
||||||
def start_ffmpeg(self):
|
signal.signal(signal.SIGTERM, receiveSignal)
|
||||||
ffmpeg_global_args = [
|
signal.signal(signal.SIGINT, receiveSignal)
|
||||||
'-hide_banner', '-loglevel', 'panic'
|
|
||||||
]
|
|
||||||
ffmpeg_input_args = [
|
|
||||||
'-avoid_negative_ts', 'make_zero',
|
|
||||||
'-fflags', 'nobuffer',
|
|
||||||
'-flags', 'low_delay',
|
|
||||||
'-strict', 'experimental',
|
|
||||||
'-fflags', '+genpts',
|
|
||||||
'-rtsp_transport', 'tcp',
|
|
||||||
'-stimeout', '5000000',
|
|
||||||
'-use_wallclock_as_timestamps', '1'
|
|
||||||
]
|
|
||||||
|
|
||||||
ffmpeg_cmd = (['ffmpeg'] +
|
frame_queue = process_info['frame_queue']
|
||||||
ffmpeg_global_args +
|
camera_watchdog = CameraWatchdog(name, config, frame_queue, process_info['camera_fps'], process_info['ffmpeg_pid'], stop_event)
|
||||||
self.ffmpeg_hwaccel_args +
|
camera_watchdog.start()
|
||||||
ffmpeg_input_args +
|
camera_watchdog.join()
|
||||||
['-i', self.rtsp_url,
|
|
||||||
'-f', 'rawvideo',
|
|
||||||
'-pix_fmt', 'rgb24',
|
|
||||||
'pipe:'])
|
|
||||||
|
|
||||||
print(" ".join(ffmpeg_cmd))
|
def track_camera(name, config: CameraConfig, model_shape, detection_queue, result_connection, detected_objects_queue, process_info):
|
||||||
|
stop_event = mp.Event()
|
||||||
|
def receiveSignal(signalNumber, frame):
|
||||||
|
stop_event.set()
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, receiveSignal)
|
||||||
|
signal.signal(signal.SIGINT, receiveSignal)
|
||||||
|
|
||||||
|
threading.current_thread().name = f"process:{name}"
|
||||||
|
setproctitle(f"frigate.process:{name}")
|
||||||
|
listen()
|
||||||
|
|
||||||
|
frame_queue = process_info['frame_queue']
|
||||||
|
detection_enabled = process_info['detection_enabled']
|
||||||
|
|
||||||
|
frame_shape = config.frame_shape
|
||||||
|
objects_to_track = config.objects.track
|
||||||
|
object_filters = config.objects.filters
|
||||||
|
|
||||||
|
motion_detector = MotionDetector(frame_shape, config.motion)
|
||||||
|
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection, model_shape)
|
||||||
|
|
||||||
|
object_tracker = ObjectTracker(config.detect)
|
||||||
|
|
||||||
|
frame_manager = SharedMemoryFrameManager()
|
||||||
|
|
||||||
|
process_frames(name, frame_queue, frame_shape, model_shape, frame_manager, motion_detector, object_detector,
|
||||||
|
object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, detection_enabled, stop_event)
|
||||||
|
|
||||||
|
logger.info(f"{name}: exiting subprocess")
|
||||||
|
|
||||||
|
def reduce_boxes(boxes):
|
||||||
|
if len(boxes) == 0:
|
||||||
|
return []
|
||||||
|
reduced_boxes = cv2.groupRectangles([list(b) for b in itertools.chain(boxes, boxes)], 1, 0.2)[0]
|
||||||
|
return [tuple(b) for b in reduced_boxes]
|
||||||
|
|
||||||
|
def detect(object_detector, frame, model_shape, region, objects_to_track, object_filters):
|
||||||
|
tensor_input = create_tensor_input(frame, model_shape, region)
|
||||||
|
|
||||||
|
detections = []
|
||||||
|
region_detections = object_detector.detect(tensor_input)
|
||||||
|
for d in region_detections:
|
||||||
|
box = d[2]
|
||||||
|
size = region[2]-region[0]
|
||||||
|
x_min = int((box[1] * size) + region[0])
|
||||||
|
y_min = int((box[0] * size) + region[1])
|
||||||
|
x_max = int((box[3] * size) + region[0])
|
||||||
|
y_max = int((box[2] * size) + region[1])
|
||||||
|
det = (d[0],
|
||||||
|
d[1],
|
||||||
|
(x_min, y_min, x_max, y_max),
|
||||||
|
(x_max-x_min)*(y_max-y_min),
|
||||||
|
region)
|
||||||
|
# apply object filters
|
||||||
|
if filtered(det, objects_to_track, object_filters):
|
||||||
|
continue
|
||||||
|
detections.append(det)
|
||||||
|
return detections
|
||||||
|
|
||||||
|
def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_shape,
|
||||||
|
frame_manager: FrameManager, motion_detector: MotionDetector,
|
||||||
|
object_detector: RemoteObjectDetector, object_tracker: ObjectTracker,
|
||||||
|
detected_objects_queue: mp.Queue, process_info: Dict,
|
||||||
|
objects_to_track: List[str], object_filters, detection_enabled: mp.Value, stop_event,
|
||||||
|
exit_on_empty: bool = False):
|
||||||
|
|
||||||
|
fps = process_info['process_fps']
|
||||||
|
detection_fps = process_info['detection_fps']
|
||||||
|
current_frame_time = process_info['detection_frame']
|
||||||
|
|
||||||
|
fps_tracker = EventsPerSecond()
|
||||||
|
fps_tracker.start()
|
||||||
|
|
||||||
|
while True:
|
||||||
|
if stop_event.is_set():
|
||||||
|
break
|
||||||
|
|
||||||
|
if exit_on_empty and frame_queue.empty():
|
||||||
|
logger.info(f"Exiting track_objects...")
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
frame_time = frame_queue.get(True, 10)
|
||||||
|
except queue.Empty:
|
||||||
|
continue
|
||||||
|
|
||||||
|
current_frame_time.value = frame_time
|
||||||
|
|
||||||
|
frame = frame_manager.get(f"{camera_name}{frame_time}", (frame_shape[0]*3//2, frame_shape[1]))
|
||||||
|
|
||||||
|
if frame is None:
|
||||||
|
logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not detection_enabled.value:
|
||||||
|
fps.value = fps_tracker.eps()
|
||||||
|
object_tracker.match_and_update(frame_time, [])
|
||||||
|
detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, [], []))
|
||||||
|
detection_fps.value = object_detector.fps.eps()
|
||||||
|
frame_manager.close(f"{camera_name}{frame_time}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# look for motion
|
||||||
|
motion_boxes = motion_detector.detect(frame)
|
||||||
|
|
||||||
|
tracked_object_boxes = [obj['box'] for obj in object_tracker.tracked_objects.values()]
|
||||||
|
|
||||||
|
# combine motion boxes with known locations of existing objects
|
||||||
|
combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)
|
||||||
|
|
||||||
|
# compute regions
|
||||||
|
regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
|
||||||
|
for a in combined_boxes]
|
||||||
|
|
||||||
|
# combine overlapping regions
|
||||||
|
combined_regions = reduce_boxes(regions)
|
||||||
|
|
||||||
|
# re-compute regions
|
||||||
|
regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.0)
|
||||||
|
for a in combined_regions]
|
||||||
|
|
||||||
|
# resize regions and detect
|
||||||
|
detections = []
|
||||||
|
for region in regions:
|
||||||
|
detections.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters))
|
||||||
|
|
||||||
self.ffmpeg_process = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, bufsize=self.frame_size)
|
#########
|
||||||
|
# merge objects, check for clipped objects and look again up to 4 times
|
||||||
def start(self):
|
#########
|
||||||
self.start_or_restart_capture()
|
refining = True
|
||||||
# start the object detection prep threads
|
refine_count = 0
|
||||||
for detection_prep_thread in self.detection_prep_threads:
|
while refining and refine_count < 4:
|
||||||
detection_prep_thread.start()
|
refining = False
|
||||||
self.watchdog.start()
|
|
||||||
|
|
||||||
def join(self):
|
|
||||||
self.capture_thread.join()
|
|
||||||
|
|
||||||
def get_capture_pid(self):
|
|
||||||
return self.ffmpeg_process.pid
|
|
||||||
|
|
||||||
def add_objects(self, objects):
|
|
||||||
if len(objects) == 0:
|
|
||||||
return
|
|
||||||
|
|
||||||
for obj in objects:
|
# group by name
|
||||||
# Store object area to use in bounding box labels
|
detected_object_groups = defaultdict(lambda: [])
|
||||||
obj['area'] = (obj['xmax']-obj['xmin'])*(obj['ymax']-obj['ymin'])
|
for detection in detections:
|
||||||
|
detected_object_groups[detection[0]].append(detection)
|
||||||
|
|
||||||
if obj['name'] == 'person':
|
selected_objects = []
|
||||||
# find the matching region
|
for group in detected_object_groups.values():
|
||||||
region = None
|
|
||||||
for r in self.regions:
|
|
||||||
if (
|
|
||||||
obj['xmin'] >= r['x_offset'] and
|
|
||||||
obj['ymin'] >= r['y_offset'] and
|
|
||||||
obj['xmax'] <= r['x_offset']+r['size'] and
|
|
||||||
obj['ymax'] <= r['y_offset']+r['size']
|
|
||||||
):
|
|
||||||
region = r
|
|
||||||
break
|
|
||||||
|
|
||||||
# if the min person area is larger than the
|
|
||||||
# detected person, don't add it to detected objects
|
|
||||||
if region and 'min_person_area' in region and region['min_person_area'] > obj['area']:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# compute the coordinates of the person and make sure
|
|
||||||
# the location isnt outside the bounds of the image (can happen from rounding)
|
|
||||||
y_location = min(int(obj['ymax']), len(self.mask)-1)
|
|
||||||
x_location = min(int((obj['xmax']-obj['xmin'])/2.0)+obj['xmin'], len(self.mask[0])-1)
|
|
||||||
|
|
||||||
# if the person is in a masked location, continue
|
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
|
||||||
if self.mask[y_location][x_location] == [0]:
|
boxes = [(o[2][0], o[2][1], o[2][2]-o[2][0], o[2][3]-o[2][1])
|
||||||
continue
|
for o in group]
|
||||||
|
confidences = [o[1] for o in group]
|
||||||
|
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
|
||||||
|
|
||||||
self.detected_objects.append(obj)
|
for index in idxs:
|
||||||
|
obj = group[index[0]]
|
||||||
|
if clipped(obj, frame_shape):
|
||||||
|
box = obj[2]
|
||||||
|
# calculate a new region that will hopefully get the entire object
|
||||||
|
region = calculate_region(frame_shape,
|
||||||
|
box[0], box[1],
|
||||||
|
box[2], box[3])
|
||||||
|
|
||||||
with self.objects_parsed:
|
regions.append(region)
|
||||||
self.objects_parsed.notify_all()
|
|
||||||
|
selected_objects.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters))
|
||||||
|
|
||||||
def get_best_person(self):
|
refining = True
|
||||||
return self.best_person_frame.best_frame
|
else:
|
||||||
|
selected_objects.append(obj)
|
||||||
def get_current_frame_with_objects(self):
|
# set the detections list to only include top, complete objects
|
||||||
# make a copy of the current detected objects
|
# and new detections
|
||||||
detected_objects = self.detected_objects.copy()
|
detections = selected_objects
|
||||||
# lock and make a copy of the current frame
|
|
||||||
with self.frame_lock:
|
|
||||||
frame = self.current_frame.copy()
|
|
||||||
|
|
||||||
# draw the bounding boxes on the screen
|
if refining:
|
||||||
for obj in detected_objects:
|
refine_count += 1
|
||||||
label = "{}: {}% {}".format(obj['name'],int(obj['score']*100),int(obj['area']))
|
|
||||||
draw_box_with_label(frame, obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'], label)
|
|
||||||
|
|
||||||
for region in self.regions:
|
# now that we have refined our detections, we need to track objects
|
||||||
color = (255,255,255)
|
object_tracker.match_and_update(frame_time, detections)
|
||||||
cv2.rectangle(frame, (region['x_offset'], region['y_offset']),
|
|
||||||
(region['x_offset']+region['size'], region['y_offset']+region['size']),
|
|
||||||
color, 2)
|
|
||||||
|
|
||||||
# convert to BGR
|
# add to the queue if not full
|
||||||
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
if(detected_objects_queue.full()):
|
||||||
|
frame_manager.delete(f"{camera_name}{frame_time}")
|
||||||
return frame
|
continue
|
||||||
|
else:
|
||||||
|
fps_tracker.update()
|
||||||
|
fps.value = fps_tracker.eps()
|
||||||
|
detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, motion_boxes, regions))
|
||||||
|
detection_fps.value = object_detector.fps.eps()
|
||||||
|
frame_manager.close(f"{camera_name}{frame_time}")
|
||||||
|
|||||||
36
frigate/watchdog.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
import datetime
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class FrigateWatchdog(threading.Thread):
|
||||||
|
def __init__(self, detectors, stop_event):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.name = 'frigate_watchdog'
|
||||||
|
self.detectors = detectors
|
||||||
|
self.stop_event = stop_event
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
time.sleep(10)
|
||||||
|
while True:
|
||||||
|
# wait a bit before checking
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
if self.stop_event.is_set():
|
||||||
|
logger.info(f"Exiting watchdog...")
|
||||||
|
break
|
||||||
|
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
# check the detection processes
|
||||||
|
for detector in self.detectors.values():
|
||||||
|
detection_start = detector.detection_start.value
|
||||||
|
if (detection_start > 0.0 and
|
||||||
|
now - detection_start > 10):
|
||||||
|
logger.info("Detection appears to be stuck. Restarting detection process")
|
||||||
|
detector.start_or_restart()
|
||||||
|
elif not detector.detect_process.is_alive():
|
||||||
|
logger.info("Detection appears to have stopped. Restarting detection process")
|
||||||
|
detector.start_or_restart()
|
||||||
58
frigate/zeroconf.py
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
import logging
|
||||||
|
import socket
|
||||||
|
|
||||||
|
from zeroconf import (
|
||||||
|
ServiceInfo,
|
||||||
|
NonUniqueNameException,
|
||||||
|
InterfaceChoice,
|
||||||
|
IPVersion,
|
||||||
|
Zeroconf,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
ZEROCONF_TYPE = "_frigate._tcp.local."
|
||||||
|
|
||||||
|
# Taken from: http://stackoverflow.com/a/11735897
|
||||||
|
def get_local_ip() -> str:
|
||||||
|
"""Try to determine the local IP address of the machine."""
|
||||||
|
try:
|
||||||
|
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||||
|
|
||||||
|
# Use Google Public DNS server to determine own IP
|
||||||
|
sock.connect(("8.8.8.8", 80))
|
||||||
|
|
||||||
|
return sock.getsockname()[0] # type: ignore
|
||||||
|
except OSError:
|
||||||
|
try:
|
||||||
|
return socket.gethostbyname(socket.gethostname())
|
||||||
|
except socket.gaierror:
|
||||||
|
return "127.0.0.1"
|
||||||
|
finally:
|
||||||
|
sock.close()
|
||||||
|
|
||||||
|
def broadcast_zeroconf(frigate_id):
|
||||||
|
zeroconf = Zeroconf(interfaces=InterfaceChoice.Default, ip_version=IPVersion.V4Only)
|
||||||
|
|
||||||
|
host_ip = get_local_ip()
|
||||||
|
|
||||||
|
try:
|
||||||
|
host_ip_pton = socket.inet_pton(socket.AF_INET, host_ip)
|
||||||
|
except OSError:
|
||||||
|
host_ip_pton = socket.inet_pton(socket.AF_INET6, host_ip)
|
||||||
|
|
||||||
|
info = ServiceInfo(
|
||||||
|
ZEROCONF_TYPE,
|
||||||
|
name=f"{frigate_id}.{ZEROCONF_TYPE}",
|
||||||
|
addresses=[host_ip_pton],
|
||||||
|
port=5000,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Starting Zeroconf broadcast")
|
||||||
|
try:
|
||||||
|
zeroconf.register_service(info)
|
||||||
|
except NonUniqueNameException:
|
||||||
|
logger.error(
|
||||||
|
"Frigate instance with identical name present in the local network"
|
||||||
|
)
|
||||||
|
return zeroconf
|
||||||
80
labelmap.txt
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
0 person
|
||||||
|
1 bicycle
|
||||||
|
2 car
|
||||||
|
3 motorcycle
|
||||||
|
4 airplane
|
||||||
|
5 bus
|
||||||
|
6 train
|
||||||
|
7 car
|
||||||
|
8 boat
|
||||||
|
9 traffic light
|
||||||
|
10 fire hydrant
|
||||||
|
12 stop sign
|
||||||
|
13 parking meter
|
||||||
|
14 bench
|
||||||
|
15 bird
|
||||||
|
16 cat
|
||||||
|
17 dog
|
||||||
|
18 horse
|
||||||
|
19 sheep
|
||||||
|
20 cow
|
||||||
|
21 elephant
|
||||||
|
22 bear
|
||||||
|
23 zebra
|
||||||
|
24 giraffe
|
||||||
|
26 backpack
|
||||||
|
27 umbrella
|
||||||
|
30 handbag
|
||||||
|
31 tie
|
||||||
|
32 suitcase
|
||||||
|
33 frisbee
|
||||||
|
34 skis
|
||||||
|
35 snowboard
|
||||||
|
36 sports ball
|
||||||
|
37 kite
|
||||||
|
38 baseball bat
|
||||||
|
39 baseball glove
|
||||||
|
40 skateboard
|
||||||
|
41 surfboard
|
||||||
|
42 tennis racket
|
||||||
|
43 bottle
|
||||||
|
45 wine glass
|
||||||
|
46 cup
|
||||||
|
47 fork
|
||||||
|
48 knife
|
||||||
|
49 spoon
|
||||||
|
50 bowl
|
||||||
|
51 banana
|
||||||
|
52 apple
|
||||||
|
53 sandwich
|
||||||
|
54 orange
|
||||||
|
55 broccoli
|
||||||
|
56 carrot
|
||||||
|
57 hot dog
|
||||||
|
58 pizza
|
||||||
|
59 donut
|
||||||
|
60 cake
|
||||||
|
61 chair
|
||||||
|
62 couch
|
||||||
|
63 potted plant
|
||||||
|
64 bed
|
||||||
|
66 dining table
|
||||||
|
69 toilet
|
||||||
|
71 tv
|
||||||
|
72 laptop
|
||||||
|
73 mouse
|
||||||
|
74 remote
|
||||||
|
75 keyboard
|
||||||
|
76 cell phone
|
||||||
|
77 microwave
|
||||||
|
78 oven
|
||||||
|
79 toaster
|
||||||
|
80 sink
|
||||||
|
81 refrigerator
|
||||||
|
83 book
|
||||||
|
84 clock
|
||||||
|
85 vase
|
||||||
|
86 scissors
|
||||||
|
87 teddy bear
|
||||||
|
88 hair drier
|
||||||
|
89 toothbrush
|
||||||
41
migrations/001_create_events_table.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
"""Peewee migrations -- 001_create_events_table.py.
|
||||||
|
|
||||||
|
Some examples (model - class or model name)::
|
||||||
|
|
||||||
|
> Model = migrator.orm['model_name'] # Return model in current state by name
|
||||||
|
|
||||||
|
> migrator.sql(sql) # Run custom SQL
|
||||||
|
> migrator.python(func, *args, **kwargs) # Run python code
|
||||||
|
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||||
|
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||||
|
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||||
|
> migrator.change_fields(model, **fields) # Change fields
|
||||||
|
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||||
|
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||||
|
> migrator.rename_table(model, new_table_name)
|
||||||
|
> migrator.add_index(model, *col_names, unique=False)
|
||||||
|
> migrator.drop_index(model, *col_names)
|
||||||
|
> migrator.add_not_null(model, *field_names)
|
||||||
|
> migrator.drop_not_null(model, *field_names)
|
||||||
|
> migrator.add_default(model, field_name, default)
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import datetime as dt
|
||||||
|
import peewee as pw
|
||||||
|
from decimal import ROUND_HALF_EVEN
|
||||||
|
|
||||||
|
try:
|
||||||
|
import playhouse.postgres_ext as pw_pext
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
SQL = pw.SQL
|
||||||
|
|
||||||
|
def migrate(migrator, database, fake=False, **kwargs):
|
||||||
|
migrator.sql('CREATE TABLE IF NOT EXISTS "event" ("id" VARCHAR(30) NOT NULL PRIMARY KEY, "label" VARCHAR(20) NOT NULL, "camera" VARCHAR(20) NOT NULL, "start_time" DATETIME NOT NULL, "end_time" DATETIME NOT NULL, "top_score" REAL NOT NULL, "false_positive" INTEGER NOT NULL, "zones" JSON NOT NULL, "thumbnail" TEXT NOT NULL)')
|
||||||
|
migrator.sql('CREATE INDEX IF NOT EXISTS "event_label" ON "event" ("label")')
|
||||||
|
migrator.sql('CREATE INDEX IF NOT EXISTS "event_camera" ON "event" ("camera")')
|
||||||
|
|
||||||
|
def rollback(migrator, database, fake=False, **kwargs):
|
||||||
|
pass
|
||||||
41
migrations/002_add_clip_snapshot.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
"""Peewee migrations -- 002_add_clip_snapshot.py.
|
||||||
|
|
||||||
|
Some examples (model - class or model name)::
|
||||||
|
|
||||||
|
> Model = migrator.orm['model_name'] # Return model in current state by name
|
||||||
|
|
||||||
|
> migrator.sql(sql) # Run custom SQL
|
||||||
|
> migrator.python(func, *args, **kwargs) # Run python code
|
||||||
|
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||||
|
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||||
|
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||||
|
> migrator.change_fields(model, **fields) # Change fields
|
||||||
|
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||||
|
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||||
|
> migrator.rename_table(model, new_table_name)
|
||||||
|
> migrator.add_index(model, *col_names, unique=False)
|
||||||
|
> migrator.drop_index(model, *col_names)
|
||||||
|
> migrator.add_not_null(model, *field_names)
|
||||||
|
> migrator.drop_not_null(model, *field_names)
|
||||||
|
> migrator.add_default(model, field_name, default)
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import datetime as dt
|
||||||
|
import peewee as pw
|
||||||
|
from decimal import ROUND_HALF_EVEN
|
||||||
|
from frigate.models import Event
|
||||||
|
|
||||||
|
try:
|
||||||
|
import playhouse.postgres_ext as pw_pext
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
SQL = pw.SQL
|
||||||
|
|
||||||
|
|
||||||
|
def migrate(migrator, database, fake=False, **kwargs):
|
||||||
|
migrator.add_fields(Event, has_clip=pw.BooleanField(default=True), has_snapshot=pw.BooleanField(default=True))
|
||||||
|
|
||||||
|
def rollback(migrator, database, fake=False, **kwargs):
|
||||||
|
migrator.remove_fields(Event, ['has_clip', 'has_snapshot'])
|
||||||
134
nginx/nginx.conf
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
worker_processes 1;
|
||||||
|
|
||||||
|
error_log /var/log/nginx/error.log warn;
|
||||||
|
pid /var/run/nginx.pid;
|
||||||
|
|
||||||
|
load_module "modules/ngx_rtmp_module.so";
|
||||||
|
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
http {
|
||||||
|
include /etc/nginx/mime.types;
|
||||||
|
default_type application/octet-stream;
|
||||||
|
|
||||||
|
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||||
|
'$status $body_bytes_sent "$http_referer" '
|
||||||
|
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||||
|
|
||||||
|
access_log /var/log/nginx/access.log main;
|
||||||
|
|
||||||
|
sendfile on;
|
||||||
|
|
||||||
|
keepalive_timeout 65;
|
||||||
|
|
||||||
|
upstream frigate_api {
|
||||||
|
server localhost:5001;
|
||||||
|
keepalive 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 5000;
|
||||||
|
|
||||||
|
location /stream/ {
|
||||||
|
add_header 'Cache-Control' 'no-cache';
|
||||||
|
add_header 'Access-Control-Allow-Origin' "$http_origin" always;
|
||||||
|
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||||
|
add_header 'Access-Control-Expose-Headers' 'Content-Length';
|
||||||
|
if ($request_method = 'OPTIONS') {
|
||||||
|
add_header 'Access-Control-Allow-Origin' "$http_origin";
|
||||||
|
add_header 'Access-Control-Max-Age' 1728000;
|
||||||
|
add_header 'Content-Type' 'text/plain charset=UTF-8';
|
||||||
|
add_header 'Content-Length' 0;
|
||||||
|
return 204;
|
||||||
|
}
|
||||||
|
|
||||||
|
types {
|
||||||
|
application/dash+xml mpd;
|
||||||
|
application/vnd.apple.mpegurl m3u8;
|
||||||
|
video/mp2t ts;
|
||||||
|
image/jpeg jpg;
|
||||||
|
}
|
||||||
|
|
||||||
|
root /tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /clips/ {
|
||||||
|
add_header 'Access-Control-Allow-Origin' "$http_origin" always;
|
||||||
|
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||||
|
add_header 'Access-Control-Expose-Headers' 'Content-Length';
|
||||||
|
if ($request_method = 'OPTIONS') {
|
||||||
|
add_header 'Access-Control-Allow-Origin' "$http_origin";
|
||||||
|
add_header 'Access-Control-Max-Age' 1728000;
|
||||||
|
add_header 'Content-Type' 'text/plain charset=UTF-8';
|
||||||
|
add_header 'Content-Length' 0;
|
||||||
|
return 204;
|
||||||
|
}
|
||||||
|
|
||||||
|
types {
|
||||||
|
video/mp4 mp4;
|
||||||
|
image/jpeg jpg;
|
||||||
|
}
|
||||||
|
|
||||||
|
autoindex on;
|
||||||
|
root /media/frigate;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /recordings/ {
|
||||||
|
add_header 'Access-Control-Allow-Origin' "$http_origin" always;
|
||||||
|
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||||
|
add_header 'Access-Control-Expose-Headers' 'Content-Length';
|
||||||
|
if ($request_method = 'OPTIONS') {
|
||||||
|
add_header 'Access-Control-Allow-Origin' "$http_origin";
|
||||||
|
add_header 'Access-Control-Max-Age' 1728000;
|
||||||
|
add_header 'Content-Type' 'text/plain charset=UTF-8';
|
||||||
|
add_header 'Content-Length' 0;
|
||||||
|
return 204;
|
||||||
|
}
|
||||||
|
|
||||||
|
types {
|
||||||
|
video/mp4 mp4;
|
||||||
|
}
|
||||||
|
|
||||||
|
autoindex on;
|
||||||
|
autoindex_format json;
|
||||||
|
root /media/frigate;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /api/ {
|
||||||
|
add_header 'Access-Control-Allow-Origin' '*';
|
||||||
|
proxy_pass http://frigate_api/;
|
||||||
|
proxy_pass_request_headers on;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
}
|
||||||
|
|
||||||
|
location / {
|
||||||
|
sub_filter 'href="/' 'href="$http_x_ingress_path/';
|
||||||
|
sub_filter 'url(/' 'url($http_x_ingress_path/';
|
||||||
|
sub_filter '"/js/' '"$http_x_ingress_path/js/';
|
||||||
|
sub_filter '<body>' '<body><script>window.baseUrl="$http_x_ingress_path";</script>';
|
||||||
|
sub_filter_types text/css application/javascript;
|
||||||
|
sub_filter_once off;
|
||||||
|
root /opt/frigate/web;
|
||||||
|
try_files $uri $uri/ /index.html;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rtmp {
|
||||||
|
server {
|
||||||
|
listen 1935;
|
||||||
|
chunk_size 4096;
|
||||||
|
allow publish 127.0.0.1;
|
||||||
|
deny publish all;
|
||||||
|
allow play all;
|
||||||
|
application live {
|
||||||
|
live on;
|
||||||
|
record off;
|
||||||
|
meta copy;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
4
run.sh
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
service nginx start
|
||||||
|
exec python3 -u -m frigate
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
CPU_ARCH=$(uname -m)
|
|
||||||
OS_VERSION=$(uname -v)
|
|
||||||
|
|
||||||
echo "CPU_ARCH ${CPU_ARCH}"
|
|
||||||
echo "OS_VERSION ${OS_VERSION}"
|
|
||||||
|
|
||||||
if [[ "${CPU_ARCH}" == "x86_64" ]]; then
|
|
||||||
echo "Recognized as Linux on x86_64."
|
|
||||||
LIBEDGETPU_SUFFIX=x86_64
|
|
||||||
HOST_GNU_TYPE=x86_64-linux-gnu
|
|
||||||
elif [[ "${CPU_ARCH}" == "armv7l" ]]; then
|
|
||||||
echo "Recognized as Linux on ARM32 platform."
|
|
||||||
LIBEDGETPU_SUFFIX=arm32
|
|
||||||
HOST_GNU_TYPE=arm-linux-gnueabihf
|
|
||||||
elif [[ "${CPU_ARCH}" == "aarch64" ]]; then
|
|
||||||
echo "Recognized as generic ARM64 platform."
|
|
||||||
LIBEDGETPU_SUFFIX=arm64
|
|
||||||
HOST_GNU_TYPE=aarch64-linux-gnu
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z "${HOST_GNU_TYPE}" ]]; then
|
|
||||||
echo "Your platform is not supported."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Using maximum operating frequency."
|
|
||||||
LIBEDGETPU_SRC="libedgetpu/libedgetpu_${LIBEDGETPU_SUFFIX}.so"
|
|
||||||
LIBEDGETPU_DST="/usr/lib/${HOST_GNU_TYPE}/libedgetpu.so.1.0"
|
|
||||||
|
|
||||||
# Runtime library.
|
|
||||||
echo "Installing Edge TPU runtime library [${LIBEDGETPU_DST}]..."
|
|
||||||
if [[ -f "${LIBEDGETPU_DST}" ]]; then
|
|
||||||
echo "File already exists. Replacing it..."
|
|
||||||
rm -f "${LIBEDGETPU_DST}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cp -p "${LIBEDGETPU_SRC}" "${LIBEDGETPU_DST}"
|
|
||||||
ldconfig
|
|
||||||
echo "Done."
|
|
||||||
|
|
||||||
# Python API.
|
|
||||||
WHEEL=$(ls edgetpu-*-py3-none-any.whl 2>/dev/null)
|
|
||||||
if [[ $? == 0 ]]; then
|
|
||||||
echo "Installing Edge TPU Python API..."
|
|
||||||
python3 -m pip install --no-deps "${WHEEL}"
|
|
||||||
echo "Done."
|
|
||||||
fi
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys D986B59D
|
|
||||||
|
|
||||||
echo "deb http://deb.odroid.in/5422-s bionic main" > /etc/apt/sources.list.d/odroid.list
|
|
||||||
1
web/.dockerignore
Normal file
@@ -0,0 +1 @@
|
|||||||
|
node_modules
|
||||||
8
web/README.md
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# Frigate Web UI
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
1. Build the docker images in the root of the repository `make amd64_all` (or appropriate for your system)
|
||||||
|
2. Create a config file in `config/`
|
||||||
|
3. Run the container: `docker run --rm --name frigate --privileged -v $PWD/config:/config:ro -v /etc/localtime:/etc/localtime:ro -p 5000:5000 frigate`
|
||||||
|
4. Run the dev ui: `cd web && npm run start`
|
||||||
8497
web/package-lock.json
generated
Normal file
24
web/package.json
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"name": "frigate",
|
||||||
|
"private": true,
|
||||||
|
"scripts": {
|
||||||
|
"start": "cross-env SNOWPACK_PUBLIC_API_HOST=http://localhost:5000 snowpack dev",
|
||||||
|
"prebuild": "rimraf build",
|
||||||
|
"build": "snowpack build"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"@prefresh/snowpack": "^3.0.1",
|
||||||
|
"@snowpack/plugin-optimize": "^0.2.13",
|
||||||
|
"@snowpack/plugin-postcss": "^1.1.0",
|
||||||
|
"@snowpack/plugin-webpack": "^2.3.0",
|
||||||
|
"autoprefixer": "^10.2.1",
|
||||||
|
"cross-env": "^7.0.3",
|
||||||
|
"postcss": "^8.2.2",
|
||||||
|
"postcss-cli": "^8.3.1",
|
||||||
|
"preact": "^10.5.9",
|
||||||
|
"preact-router": "^3.2.1",
|
||||||
|
"rimraf": "^3.0.2",
|
||||||
|
"snowpack": "^3.0.0",
|
||||||
|
"tailwindcss": "^2.0.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
8
web/postcss.config.js
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
'use strict';
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
plugins: [
|
||||||
|
require('tailwindcss'),
|
||||||
|
require('autoprefixer'),
|
||||||
|
],
|
||||||
|
};
|
||||||
BIN
web/public/android-chrome-192x192.png
Normal file
|
After Width: | Height: | Size: 3.1 KiB |
BIN
web/public/android-chrome-512x512.png
Normal file
|
After Width: | Height: | Size: 6.9 KiB |
BIN
web/public/apple-touch-icon.png
Normal file
|
After Width: | Height: | Size: 3.3 KiB |
BIN
web/public/favicon-16x16.png
Normal file
|
After Width: | Height: | Size: 558 B |
BIN
web/public/favicon-32x32.png
Normal file
|
After Width: | Height: | Size: 800 B |
BIN
web/public/favicon.ico
Normal file
|
After Width: | Height: | Size: 15 KiB |
BIN
web/public/favicon.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
21
web/public/index.html
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8" />
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||||
|
<link rel="icon" href="/favicon.ico" />
|
||||||
|
<title>Frigate</title>
|
||||||
|
<link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png" />
|
||||||
|
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png" />
|
||||||
|
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png" />
|
||||||
|
<link rel="manifest" href="/site.webmanifest" />
|
||||||
|
<link rel="mask-icon" href="/safari-pinned-tab.svg" color="#3b82f7" />
|
||||||
|
<meta name="msapplication-TileColor" content="#3b82f7" />
|
||||||
|
<meta name="theme-color" content="#ff0000" />
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div id="root"></div>
|
||||||
|
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||||
|
<script type="module" src="/dist/index.js"></script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
BIN
web/public/mstile-150x150.png
Normal file
|
After Width: | Height: | Size: 2.6 KiB |
46
web/public/safari-pinned-tab.svg
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
<?xml version="1.0" standalone="no"?>
|
||||||
|
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
|
||||||
|
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
|
||||||
|
<svg version="1.0" xmlns="http://www.w3.org/2000/svg"
|
||||||
|
width="888.000000pt" height="888.000000pt" viewBox="0 0 888.000000 888.000000"
|
||||||
|
preserveAspectRatio="xMidYMid meet">
|
||||||
|
<metadata>
|
||||||
|
Created by potrace 1.11, written by Peter Selinger 2001-2013
|
||||||
|
</metadata>
|
||||||
|
<g transform="translate(0.000000,888.000000) scale(0.100000,-0.100000)"
|
||||||
|
fill="#000000" stroke="none">
|
||||||
|
<path d="M8228 8865 c-2 -2 -25 -6 -53 -9 -38 -5 -278 -56 -425 -91 -33 -7
|
||||||
|
-381 -98 -465 -121 -49 -14 -124 -34 -165 -45 -67 -18 -485 -138 -615 -176
|
||||||
|
-50 -14 -106 -30 -135 -37 -8 -2 -35 -11 -60 -19 -25 -8 -85 -27 -135 -42 -49
|
||||||
|
-14 -101 -31 -115 -36 -14 -5 -34 -11 -45 -13 -11 -3 -65 -19 -120 -36 -55
|
||||||
|
-18 -127 -40 -160 -50 -175 -53 -247 -77 -550 -178 -364 -121 -578 -200 -820
|
||||||
|
-299 -88 -36 -214 -88 -280 -115 -66 -27 -129 -53 -140 -58 -11 -5 -67 -29
|
||||||
|
-125 -54 -342 -144 -535 -259 -579 -343 -34 -66 7 -145 156 -299 229 -238 293
|
||||||
|
-316 340 -413 38 -80 41 -152 10 -281 -57 -234 -175 -543 -281 -732 -98 -174
|
||||||
|
-172 -239 -341 -297 -116 -40 -147 -52 -210 -80 -107 -49 -179 -107 -290 -236
|
||||||
|
-51 -59 -179 -105 -365 -131 -19 -2 -48 -7 -65 -9 -16 -3 -50 -8 -75 -11 -69
|
||||||
|
-9 -130 -39 -130 -63 0 -24 31 -46 78 -56 18 -4 139 -8 270 -10 250 -4 302
|
||||||
|
-11 335 -44 19 -18 19 -23 7 -46 -19 -36 -198 -121 -490 -233 -850 -328 -914
|
||||||
|
-354 -1159 -473 -185 -90 -337 -186 -395 -249 -60 -65 -67 -107 -62 -350 3
|
||||||
|
-113 7 -216 10 -230 3 -14 7 -52 10 -85 7 -70 14 -128 21 -170 2 -16 7 -48 10
|
||||||
|
-70 3 -22 11 -64 16 -94 6 -30 12 -64 14 -75 1 -12 5 -34 9 -51 3 -16 8 -39
|
||||||
|
10 -50 12 -57 58 -258 71 -310 9 -33 18 -69 20 -79 25 -110 138 -416 216 -582
|
||||||
|
21 -47 39 -87 39 -90 0 -7 217 -438 261 -521 109 -201 293 -501 347 -564 11
|
||||||
|
-13 37 -44 56 -68 69 -82 126 -109 160 -75 26 25 14 65 -48 164 -138 218 -142
|
||||||
|
245 -138 800 2 206 4 488 5 625 1 138 -1 293 -6 345 -28 345 -28 594 -1 760
|
||||||
|
12 69 54 187 86 235 33 52 188 212 293 302 98 84 108 93 144 121 19 15 52 42
|
||||||
|
75 61 78 64 302 229 426 313 248 169 483 297 600 326 53 14 205 6 365 -17 33
|
||||||
|
-5 155 -8 270 -6 179 3 226 7 316 28 58 13 140 25 182 26 82 2 120 6 217 22
|
||||||
|
73 12 97 16 122 18 12 1 23 21 38 70 l20 68 74 -17 c81 -20 155 -30 331 -45
|
||||||
|
69 -6 132 -8 715 -20 484 -11 620 -8 729 16 85 19 131 63 98 96 -25 26 -104
|
||||||
|
34 -302 32 -373 -2 -408 -1 -471 26 -90 37 2 102 171 120 33 3 76 8 95 10 19
|
||||||
|
2 71 7 115 10 243 17 267 20 338 37 145 36 47 102 -203 137 -136 19 -262 25
|
||||||
|
-490 22 -124 -2 -362 -4 -530 -4 l-305 -1 -56 26 c-65 31 -171 109 -238 176
|
||||||
|
-52 51 -141 173 -141 191 0 6 -6 22 -14 34 -18 27 -54 165 -64 244 -12 98 -6
|
||||||
|
322 12 414 9 47 29 127 45 176 26 80 58 218 66 278 1 11 6 47 10 80 3 33 8 70
|
||||||
|
10 83 2 13 7 53 11 90 3 37 8 74 9 83 22 118 22 279 -1 464 -20 172 -20 172
|
||||||
|
70 238 108 79 426 248 666 355 25 11 77 34 115 52 92 42 443 191 570 242 55
|
||||||
|
22 109 44 120 48 24 11 130 52 390 150 199 75 449 173 500 195 17 7 118 50
|
||||||
|
225 95 237 100 333 143 490 220 229 113 348 191 337 223 -3 10 -70 20 -79 12z"/>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 2.9 KiB |
19
web/public/site.webmanifest
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"name": "",
|
||||||
|
"short_name": "",
|
||||||
|
"icons": [
|
||||||
|
{
|
||||||
|
"src": "/android-chrome-192x192.png",
|
||||||
|
"sizes": "192x192",
|
||||||
|
"type": "image/png"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"src": "/android-chrome-512x512.png",
|
||||||
|
"sizes": "512x512",
|
||||||
|
"type": "image/png"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"theme_color": "#ff0000",
|
||||||
|
"background_color": "#ff0000",
|
||||||
|
"display": "standalone"
|
||||||
|
}
|
||||||
31
web/snowpack.config.js
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
'use strict';
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
mount: {
|
||||||
|
public: { url: '/', static: true },
|
||||||
|
src: { url: '/dist' },
|
||||||
|
},
|
||||||
|
plugins: [
|
||||||
|
'@snowpack/plugin-postcss',
|
||||||
|
'@prefresh/snowpack',
|
||||||
|
[
|
||||||
|
'@snowpack/plugin-optimize',
|
||||||
|
{
|
||||||
|
preloadModules: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
[
|
||||||
|
'@snowpack/plugin-webpack',
|
||||||
|
{
|
||||||
|
sourceMap: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
],
|
||||||
|
routes: [{ match: 'routes', src: '.*', dest: '/index.html' }],
|
||||||
|
packageOptions: {
|
||||||
|
sourcemap: false,
|
||||||
|
},
|
||||||
|
buildOptions: {
|
||||||
|
sourcemap: true,
|
||||||
|
},
|
||||||
|
};
|
||||||
43
web/src/App.jsx
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
import { h } from 'preact';
|
||||||
|
import Camera from './Camera';
|
||||||
|
import CameraMap from './CameraMap';
|
||||||
|
import Cameras from './Cameras';
|
||||||
|
import Debug from './Debug';
|
||||||
|
import Event from './Event';
|
||||||
|
import Events from './Events';
|
||||||
|
import { Router } from 'preact-router';
|
||||||
|
import Sidebar from './Sidebar';
|
||||||
|
import { ApiHost, Config } from './context';
|
||||||
|
import { useContext, useEffect, useState } from 'preact/hooks';
|
||||||
|
|
||||||
|
export default function App() {
|
||||||
|
const apiHost = useContext(ApiHost);
|
||||||
|
const [config, setConfig] = useState(null);
|
||||||
|
|
||||||
|
useEffect(async () => {
|
||||||
|
const response = await fetch(`${apiHost}/api/config`);
|
||||||
|
const data = response.ok ? await response.json() : {};
|
||||||
|
setConfig(data);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return !config ? (
|
||||||
|
<div />
|
||||||
|
) : (
|
||||||
|
<Config.Provider value={config}>
|
||||||
|
<div className="md:flex flex-col md:flex-row md:min-h-screen w-full bg-gray-100 dark:bg-gray-800 text-gray-900 dark:text-white">
|
||||||
|
<Sidebar />
|
||||||
|
<div className="p-4 min-w-0">
|
||||||
|
<Router>
|
||||||
|
<CameraMap path="/cameras/:camera/editor" />
|
||||||
|
<Camera path="/cameras/:camera" />
|
||||||
|
<Event path="/events/:eventId" />
|
||||||
|
<Events path="/events" />
|
||||||
|
<Debug path="/debug" />
|
||||||
|
<Cameras path="/" />
|
||||||
|
</Router>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</Config.Provider>
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
72
web/src/Camera.jsx
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
import { h } from 'preact';
|
||||||
|
import Box from './components/Box';
|
||||||
|
import Heading from './components/Heading';
|
||||||
|
import Link from './components/Link';
|
||||||
|
import Switch from './components/Switch';
|
||||||
|
import { route } from 'preact-router';
|
||||||
|
import { useCallback, useContext } from 'preact/hooks';
|
||||||
|
import { ApiHost, Config } from './context';
|
||||||
|
|
||||||
|
export default function Camera({ camera, url }) {
|
||||||
|
const config = useContext(Config);
|
||||||
|
const apiHost = useContext(ApiHost);
|
||||||
|
|
||||||
|
if (!(camera in config.cameras)) {
|
||||||
|
return <div>{`No camera named ${camera}`}</div>;
|
||||||
|
}
|
||||||
|
|
||||||
|
const cameraConfig = config.cameras[camera];
|
||||||
|
|
||||||
|
const { pathname, searchParams } = new URL(`${window.location.protocol}//${window.location.host}${url}`);
|
||||||
|
const searchParamsString = searchParams.toString();
|
||||||
|
|
||||||
|
const handleSetOption = useCallback(
|
||||||
|
(id, value) => {
|
||||||
|
searchParams.set(id, value ? 1 : 0);
|
||||||
|
route(`${pathname}?${searchParams.toString()}`, true);
|
||||||
|
},
|
||||||
|
[searchParams]
|
||||||
|
);
|
||||||
|
|
||||||
|
function getBoolean(id) {
|
||||||
|
return Boolean(parseInt(searchParams.get(id), 10));
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<Heading size="2xl">{camera}</Heading>
|
||||||
|
<Box>
|
||||||
|
<img
|
||||||
|
width={cameraConfig.width}
|
||||||
|
height={cameraConfig.height}
|
||||||
|
key={searchParamsString}
|
||||||
|
src={`${apiHost}/api/${camera}?${searchParamsString}`}
|
||||||
|
/>
|
||||||
|
</Box>
|
||||||
|
|
||||||
|
<Box className="grid grid-cols-2 md:grid-cols-3 lg:grid-cols-4 gap-4 p-4">
|
||||||
|
<Switch checked={getBoolean('bbox')} id="bbox" label="Bounding box" onChange={handleSetOption} />
|
||||||
|
<Switch checked={getBoolean('timestamp')} id="timestamp" label="Timestamp" onChange={handleSetOption} />
|
||||||
|
<Switch checked={getBoolean('zones')} id="zones" label="Zones" onChange={handleSetOption} />
|
||||||
|
<Switch checked={getBoolean('mask')} id="mask" label="Masks" onChange={handleSetOption} />
|
||||||
|
<Switch checked={getBoolean('motion')} id="motion" label="Motion boxes" onChange={handleSetOption} />
|
||||||
|
<Switch checked={getBoolean('regions')} id="regions" label="Regions" onChange={handleSetOption} />
|
||||||
|
<Link href={`/cameras/${camera}/editor`}>Mask & Zone creator</Link>
|
||||||
|
</Box>
|
||||||
|
|
||||||
|
<div className="space-y-4">
|
||||||
|
<Heading size="sm">Tracked objects</Heading>
|
||||||
|
<div className="grid grid-cols-3 md:grid-cols-4 gap-4">
|
||||||
|
{cameraConfig.objects.track.map((objectType) => {
|
||||||
|
return (
|
||||||
|
<Box key={objectType} hover href={`/events?camera=${camera}&label=${objectType}`}>
|
||||||
|
<Heading size="sm">{objectType}</Heading>
|
||||||
|
<img src={`${apiHost}/api/${camera}/${objectType}/best.jpg?crop=1&h=150`} />
|
||||||
|
</Box>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
598
web/src/CameraMap.jsx
Normal file
@@ -0,0 +1,598 @@
|
|||||||
|
import { h } from 'preact';
|
||||||
|
import Box from './components/Box';
|
||||||
|
import Button from './components/Button';
|
||||||
|
import Heading from './components/Heading';
|
||||||
|
import Switch from './components/Switch';
|
||||||
|
import { route } from 'preact-router';
|
||||||
|
import { useCallback, useContext, useEffect, useMemo, useRef, useState } from 'preact/hooks';
|
||||||
|
import { ApiHost, Config } from './context';
|
||||||
|
|
||||||
|
export default function CameraMasks({ camera, url }) {
|
||||||
|
const config = useContext(Config);
|
||||||
|
const apiHost = useContext(ApiHost);
|
||||||
|
const imageRef = useRef(null);
|
||||||
|
const [imageScale, setImageScale] = useState(1);
|
||||||
|
const [snap, setSnap] = useState(true);
|
||||||
|
|
||||||
|
if (!(camera in config.cameras)) {
|
||||||
|
return <div>{`No camera named ${camera}`}</div>;
|
||||||
|
}
|
||||||
|
|
||||||
|
const cameraConfig = config.cameras[camera];
|
||||||
|
const {
|
||||||
|
width,
|
||||||
|
height,
|
||||||
|
motion: { mask: motionMask },
|
||||||
|
objects: { filters: objectFilters },
|
||||||
|
zones,
|
||||||
|
} = cameraConfig;
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (!imageRef.current) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const scaledWidth = imageRef.current.width;
|
||||||
|
const scale = scaledWidth / width;
|
||||||
|
setImageScale(scale);
|
||||||
|
}, [imageRef.current, setImageScale]);
|
||||||
|
|
||||||
|
const [motionMaskPoints, setMotionMaskPoints] = useState(
|
||||||
|
Array.isArray(motionMask)
|
||||||
|
? motionMask.map((mask) => getPolylinePoints(mask))
|
||||||
|
: motionMask
|
||||||
|
? [getPolylinePoints(motionMask)]
|
||||||
|
: []
|
||||||
|
);
|
||||||
|
|
||||||
|
const [zonePoints, setZonePoints] = useState(
|
||||||
|
Object.keys(zones).reduce((memo, zone) => ({ ...memo, [zone]: getPolylinePoints(zones[zone].coordinates) }), {})
|
||||||
|
);
|
||||||
|
|
||||||
|
const [objectMaskPoints, setObjectMaskPoints] = useState(
|
||||||
|
Object.keys(objectFilters).reduce(
|
||||||
|
(memo, name) => ({
|
||||||
|
...memo,
|
||||||
|
[name]: Array.isArray(objectFilters[name].mask)
|
||||||
|
? objectFilters[name].mask.map((mask) => getPolylinePoints(mask))
|
||||||
|
: objectFilters[name].mask
|
||||||
|
? [getPolylinePoints(objectFilters[name].mask)]
|
||||||
|
: [],
|
||||||
|
}),
|
||||||
|
{}
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
const [editing, setEditing] = useState({ set: motionMaskPoints, key: 0, fn: setMotionMaskPoints });
|
||||||
|
|
||||||
|
const handleUpdateEditable = useCallback(
|
||||||
|
(newPoints) => {
|
||||||
|
let newSet;
|
||||||
|
if (Array.isArray(editing.set)) {
|
||||||
|
newSet = [...editing.set];
|
||||||
|
newSet[editing.key] = newPoints;
|
||||||
|
} else if (editing.subkey !== undefined) {
|
||||||
|
newSet = { ...editing.set };
|
||||||
|
newSet[editing.key][editing.subkey] = newPoints;
|
||||||
|
} else {
|
||||||
|
newSet = { ...editing.set, [editing.key]: newPoints };
|
||||||
|
}
|
||||||
|
editing.set = newSet;
|
||||||
|
editing.fn(newSet);
|
||||||
|
},
|
||||||
|
[editing]
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleSelectEditable = useCallback(
|
||||||
|
(name) => {
|
||||||
|
setEditing(name);
|
||||||
|
},
|
||||||
|
[setEditing]
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleRemoveEditable = useCallback(
|
||||||
|
(name) => {
|
||||||
|
const filteredZonePoints = Object.keys(zonePoints)
|
||||||
|
.filter((zoneName) => zoneName !== name)
|
||||||
|
.reduce((memo, name) => {
|
||||||
|
memo[name] = zonePoints[name];
|
||||||
|
return memo;
|
||||||
|
}, {});
|
||||||
|
setZonePoints(filteredZonePoints);
|
||||||
|
},
|
||||||
|
[zonePoints, setZonePoints]
|
||||||
|
);
|
||||||
|
|
||||||
|
// Motion mask methods
|
||||||
|
const handleAddMask = useCallback(() => {
|
||||||
|
const newMotionMaskPoints = [...motionMaskPoints, []];
|
||||||
|
setMotionMaskPoints(newMotionMaskPoints);
|
||||||
|
setEditing({ set: newMotionMaskPoints, key: newMotionMaskPoints.length - 1, fn: setMotionMaskPoints });
|
||||||
|
}, [motionMaskPoints, setMotionMaskPoints]);
|
||||||
|
|
||||||
|
const handleEditMask = useCallback(
|
||||||
|
(key) => {
|
||||||
|
setEditing({ set: motionMaskPoints, key, fn: setMotionMaskPoints });
|
||||||
|
},
|
||||||
|
[setEditing, motionMaskPoints, setMotionMaskPoints]
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleRemoveMask = useCallback(
|
||||||
|
(key) => {
|
||||||
|
const newMotionMaskPoints = [...motionMaskPoints];
|
||||||
|
newMotionMaskPoints.splice(key, 1);
|
||||||
|
setMotionMaskPoints(newMotionMaskPoints);
|
||||||
|
},
|
||||||
|
[motionMaskPoints, setMotionMaskPoints]
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleCopyMotionMasks = useCallback(async () => {
|
||||||
|
await window.navigator.clipboard.writeText(` motion:
|
||||||
|
mask:
|
||||||
|
${motionMaskPoints.map((mask, i) => ` - ${polylinePointsToPolyline(mask)}`).join('\n')}`);
|
||||||
|
}, [motionMaskPoints]);
|
||||||
|
|
||||||
|
// Zone methods
|
||||||
|
const handleEditZone = useCallback(
|
||||||
|
(key) => {
|
||||||
|
setEditing({ set: zonePoints, key, fn: setZonePoints });
|
||||||
|
},
|
||||||
|
[setEditing, zonePoints, setZonePoints]
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleAddZone = useCallback(() => {
|
||||||
|
const n = Object.keys(zonePoints).filter((name) => name.startsWith('zone_')).length;
|
||||||
|
const zoneName = `zone_${n}`;
|
||||||
|
const newZonePoints = { ...zonePoints, [zoneName]: [] };
|
||||||
|
setZonePoints(newZonePoints);
|
||||||
|
setEditing({ set: newZonePoints, key: zoneName, fn: setZonePoints });
|
||||||
|
}, [zonePoints, setZonePoints]);
|
||||||
|
|
||||||
|
const handleRemoveZone = useCallback(
|
||||||
|
(key) => {
|
||||||
|
const newZonePoints = { ...zonePoints };
|
||||||
|
delete newZonePoints[key];
|
||||||
|
setZonePoints(newZonePoints);
|
||||||
|
},
|
||||||
|
[zonePoints, setZonePoints]
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleCopyZones = useCallback(async () => {
|
||||||
|
await window.navigator.clipboard.writeText(` zones:
|
||||||
|
${Object.keys(zonePoints)
|
||||||
|
.map(
|
||||||
|
(zoneName) => ` ${zoneName}:
|
||||||
|
coordinates: ${polylinePointsToPolyline(zonePoints[zoneName])}`
|
||||||
|
)
|
||||||
|
.join('\n')}`);
|
||||||
|
}, [zonePoints]);
|
||||||
|
|
||||||
|
// Object methods
|
||||||
|
const handleEditObjectMask = useCallback(
|
||||||
|
(key, subkey) => {
|
||||||
|
setEditing({ set: objectMaskPoints, key, subkey, fn: setObjectMaskPoints });
|
||||||
|
},
|
||||||
|
[setEditing, objectMaskPoints, setObjectMaskPoints]
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleAddObjectMask = useCallback(() => {
|
||||||
|
const n = Object.keys(objectMaskPoints).filter((name) => name.startsWith('object_')).length;
|
||||||
|
const newObjectName = `object_${n}`;
|
||||||
|
const newObjectMaskPoints = { ...objectMaskPoints, [newObjectName]: [] };
|
||||||
|
setObjectMaskPoints(newObjectMaskPoints);
|
||||||
|
setEditing({ set: newObjectMaskPoints, key: newObjectName, subkey: 0, fn: setObjectMaskPoints });
|
||||||
|
}, [objectMaskPoints, setObjectMaskPoints, setEditing]);
|
||||||
|
|
||||||
|
const handleRemoveObjectMask = useCallback(
|
||||||
|
(key, subkey) => {
|
||||||
|
const newObjectMaskPoints = { ...objectMaskPoints };
|
||||||
|
delete newObjectMaskPoints[key];
|
||||||
|
setObjectMaskPoints(newObjectMaskPoints);
|
||||||
|
},
|
||||||
|
[objectMaskPoints, setObjectMaskPoints]
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleCopyObjectMasks = useCallback(async () => {
|
||||||
|
await window.navigator.clipboard.writeText(` objects:
|
||||||
|
filters:
|
||||||
|
${Object.keys(objectMaskPoints)
|
||||||
|
.map((objectName) =>
|
||||||
|
objectMaskPoints[objectName].length
|
||||||
|
? ` ${objectName}:
|
||||||
|
mask: ${polylinePointsToPolyline(objectMaskPoints[objectName])}`
|
||||||
|
: ''
|
||||||
|
)
|
||||||
|
.filter(Boolean)
|
||||||
|
.join('\n')}`);
|
||||||
|
}, [objectMaskPoints]);
|
||||||
|
|
||||||
|
const handleChangeSnap = useCallback(
|
||||||
|
(id, value) => {
|
||||||
|
setSnap(value);
|
||||||
|
},
|
||||||
|
[setSnap]
|
||||||
|
);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div class="flex-col space-y-4" style={`max-width: ${width}px`}>
|
||||||
|
<Heading size="2xl">{camera} mask & zone creator</Heading>
|
||||||
|
|
||||||
|
<Box>
|
||||||
|
<p>
|
||||||
|
This tool can help you create masks & zones for your {camera} camera. When done, copy each mask configuration
|
||||||
|
into your <code className="font-mono">config.yml</code> file restart your Frigate instance to save your
|
||||||
|
changes.
|
||||||
|
</p>
|
||||||
|
</Box>
|
||||||
|
|
||||||
|
<Box className="space-y-4">
|
||||||
|
<div className="relative">
|
||||||
|
<img ref={imageRef} width={width} height={height} src={`${apiHost}/api/${camera}/latest.jpg`} />
|
||||||
|
<EditableMask
|
||||||
|
onChange={handleUpdateEditable}
|
||||||
|
points={editing.subkey ? editing.set[editing.key][editing.subkey] : editing.set[editing.key]}
|
||||||
|
scale={imageScale}
|
||||||
|
snap={snap}
|
||||||
|
width={width}
|
||||||
|
height={height}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
<Switch checked={snap} label="Snap to edges" onChange={handleChangeSnap} />
|
||||||
|
</Box>
|
||||||
|
|
||||||
|
<div class="flex-col space-y-4">
|
||||||
|
<MaskValues
|
||||||
|
editing={editing}
|
||||||
|
title="Motion masks"
|
||||||
|
onCopy={handleCopyMotionMasks}
|
||||||
|
onCreate={handleAddMask}
|
||||||
|
onEdit={handleEditMask}
|
||||||
|
onRemove={handleRemoveMask}
|
||||||
|
points={motionMaskPoints}
|
||||||
|
yamlPrefix={'motion:\n mask:'}
|
||||||
|
yamlKeyPrefix={maskYamlKeyPrefix}
|
||||||
|
/>
|
||||||
|
|
||||||
|
<MaskValues
|
||||||
|
editing={editing}
|
||||||
|
title="Zones"
|
||||||
|
onCopy={handleCopyZones}
|
||||||
|
onCreate={handleAddZone}
|
||||||
|
onEdit={handleEditZone}
|
||||||
|
onRemove={handleRemoveZone}
|
||||||
|
points={zonePoints}
|
||||||
|
yamlPrefix="zones:"
|
||||||
|
yamlKeyPrefix={zoneYamlKeyPrefix}
|
||||||
|
/>
|
||||||
|
|
||||||
|
<MaskValues
|
||||||
|
isMulti
|
||||||
|
editing={editing}
|
||||||
|
title="Object masks"
|
||||||
|
onCopy={handleCopyObjectMasks}
|
||||||
|
onCreate={handleAddObjectMask}
|
||||||
|
onEdit={handleEditObjectMask}
|
||||||
|
onRemove={handleRemoveObjectMask}
|
||||||
|
points={objectMaskPoints}
|
||||||
|
yamlPrefix={'objects:\n filters:'}
|
||||||
|
yamlKeyPrefix={objectYamlKeyPrefix}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function maskYamlKeyPrefix(points) {
|
||||||
|
return ` - `;
|
||||||
|
}
|
||||||
|
|
||||||
|
function zoneYamlKeyPrefix(points, key) {
|
||||||
|
return ` ${key}:
|
||||||
|
coordinates: `;
|
||||||
|
}
|
||||||
|
|
||||||
|
function objectYamlKeyPrefix(points, key, subkey) {
|
||||||
|
return ` - `;
|
||||||
|
}
|
||||||
|
|
||||||
|
const MaskInset = 20;
|
||||||
|
|
||||||
|
function EditableMask({ onChange, points, scale, snap, width, height }) {
|
||||||
|
if (!points) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
const boundingRef = useRef(null);
|
||||||
|
|
||||||
|
function boundedSize(value, maxValue) {
|
||||||
|
const newValue = Math.min(Math.max(0, Math.round(value)), maxValue);
|
||||||
|
if (snap) {
|
||||||
|
if (newValue <= MaskInset) {
|
||||||
|
return 0;
|
||||||
|
} else if (maxValue - newValue <= MaskInset) {
|
||||||
|
return maxValue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return newValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const handleMovePoint = useCallback(
|
||||||
|
(index, newX, newY) => {
|
||||||
|
if (newX < 0 && newY < 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let x = boundedSize(newX / scale, width, snap);
|
||||||
|
let y = boundedSize(newY / scale, height, snap);
|
||||||
|
|
||||||
|
const newPoints = [...points];
|
||||||
|
newPoints[index] = [x, y];
|
||||||
|
onChange(newPoints);
|
||||||
|
},
|
||||||
|
[scale, points, snap]
|
||||||
|
);
|
||||||
|
|
||||||
|
// Add a new point between the closest two other points
|
||||||
|
const handleAddPoint = useCallback(
|
||||||
|
(event) => {
|
||||||
|
const { offsetX, offsetY } = event;
|
||||||
|
const scaledX = boundedSize((offsetX - MaskInset) / scale, width, snap);
|
||||||
|
const scaledY = boundedSize((offsetY - MaskInset) / scale, height, snap);
|
||||||
|
const newPoint = [scaledX, scaledY];
|
||||||
|
|
||||||
|
let closest;
|
||||||
|
const { index } = points.reduce(
|
||||||
|
(result, point, i) => {
|
||||||
|
const nextPoint = points.length === i + 1 ? points[0] : points[i + 1];
|
||||||
|
const distance0 = Math.sqrt(Math.pow(point[0] - newPoint[0], 2) + Math.pow(point[1] - newPoint[1], 2));
|
||||||
|
const distance1 = Math.sqrt(Math.pow(point[0] - nextPoint[0], 2) + Math.pow(point[1] - nextPoint[1], 2));
|
||||||
|
const distance = distance0 + distance1;
|
||||||
|
return distance < result.distance ? { distance, index: i } : result;
|
||||||
|
},
|
||||||
|
{ distance: Infinity, index: -1 }
|
||||||
|
);
|
||||||
|
const newPoints = [...points];
|
||||||
|
newPoints.splice(index, 0, newPoint);
|
||||||
|
onChange(newPoints);
|
||||||
|
},
|
||||||
|
[scale, points, onChange, snap]
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleRemovePoint = useCallback(
|
||||||
|
(index) => {
|
||||||
|
const newPoints = [...points];
|
||||||
|
newPoints.splice(index, 1);
|
||||||
|
onChange(newPoints);
|
||||||
|
},
|
||||||
|
[points, onChange]
|
||||||
|
);
|
||||||
|
|
||||||
|
const scaledPoints = useMemo(() => scalePolylinePoints(points, scale), [points, scale]);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="absolute" style={`inset: -${MaskInset}px`}>
|
||||||
|
{!scaledPoints
|
||||||
|
? null
|
||||||
|
: scaledPoints.map(([x, y], i) => (
|
||||||
|
<PolyPoint
|
||||||
|
boundingRef={boundingRef}
|
||||||
|
index={i}
|
||||||
|
onMove={handleMovePoint}
|
||||||
|
onRemove={handleRemovePoint}
|
||||||
|
x={x + MaskInset}
|
||||||
|
y={y + MaskInset}
|
||||||
|
/>
|
||||||
|
))}
|
||||||
|
<div className="absolute inset-0 right-0 bottom-0" onclick={handleAddPoint} ref={boundingRef} />
|
||||||
|
<svg width="100%" height="100%" className="absolute pointer-events-none" style={`inset: ${MaskInset}px`}>
|
||||||
|
{!scaledPoints ? null : (
|
||||||
|
<g>
|
||||||
|
<polyline points={polylinePointsToPolyline(scaledPoints)} fill="rgba(244,0,0,0.5)" />
|
||||||
|
</g>
|
||||||
|
)}
|
||||||
|
</svg>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function MaskValues({
|
||||||
|
isMulti = false,
|
||||||
|
editing,
|
||||||
|
title,
|
||||||
|
onCopy,
|
||||||
|
onCreate,
|
||||||
|
onEdit,
|
||||||
|
onRemove,
|
||||||
|
points,
|
||||||
|
yamlPrefix,
|
||||||
|
yamlKeyPrefix,
|
||||||
|
}) {
|
||||||
|
const [showButtons, setShowButtons] = useState(false);
|
||||||
|
|
||||||
|
const handleMousein = useCallback(() => {
|
||||||
|
setShowButtons(true);
|
||||||
|
}, [setShowButtons]);
|
||||||
|
|
||||||
|
const handleMouseout = useCallback(
|
||||||
|
(event) => {
|
||||||
|
const el = event.toElement || event.relatedTarget;
|
||||||
|
if (!el || el.parentNode === event.target) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
setShowButtons(false);
|
||||||
|
},
|
||||||
|
[setShowButtons]
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleEdit = useCallback(
|
||||||
|
(event) => {
|
||||||
|
const { key, subkey } = event.target.dataset;
|
||||||
|
onEdit(key, subkey);
|
||||||
|
},
|
||||||
|
[onEdit]
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleRemove = useCallback(
|
||||||
|
(event) => {
|
||||||
|
const { key, subkey } = event.target.dataset;
|
||||||
|
onRemove(key, subkey);
|
||||||
|
},
|
||||||
|
[onRemove]
|
||||||
|
);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Box className="overflow-hidden" onmouseover={handleMousein} onmouseout={handleMouseout}>
|
||||||
|
<div class="flex space-x-4">
|
||||||
|
<Heading className="flex-grow self-center" size="base">
|
||||||
|
{title}
|
||||||
|
</Heading>
|
||||||
|
<Button onClick={onCopy}>Copy</Button>
|
||||||
|
<Button onClick={onCreate}>Add</Button>
|
||||||
|
</div>
|
||||||
|
<pre class="relative overflow-auto font-mono text-gray-900 dark:text-gray-100 rounded bg-gray-100 dark:bg-gray-800 p-2">
|
||||||
|
{yamlPrefix}
|
||||||
|
{Object.keys(points).map((mainkey) => {
|
||||||
|
if (isMulti) {
|
||||||
|
return (
|
||||||
|
<div>
|
||||||
|
{` ${mainkey}:\n mask:\n`}
|
||||||
|
{points[mainkey].map((item, subkey) => (
|
||||||
|
<Item
|
||||||
|
mainkey={mainkey}
|
||||||
|
subkey={subkey}
|
||||||
|
editing={editing}
|
||||||
|
handleEdit={handleEdit}
|
||||||
|
points={item}
|
||||||
|
showButtons={showButtons}
|
||||||
|
handleRemove={handleRemove}
|
||||||
|
yamlKeyPrefix={yamlKeyPrefix}
|
||||||
|
/>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
return (
|
||||||
|
<Item
|
||||||
|
mainkey={mainkey}
|
||||||
|
editing={editing}
|
||||||
|
handleEdit={handleEdit}
|
||||||
|
points={points[mainkey]}
|
||||||
|
showButtons={showButtons}
|
||||||
|
handleRemove={handleRemove}
|
||||||
|
yamlKeyPrefix={yamlKeyPrefix}
|
||||||
|
/>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
})}
|
||||||
|
</pre>
|
||||||
|
</Box>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function Item({ mainkey, subkey, editing, handleEdit, points, showButtons, handleRemove, yamlKeyPrefix }) {
|
||||||
|
return (
|
||||||
|
<span
|
||||||
|
data-key={mainkey}
|
||||||
|
data-subkey={subkey}
|
||||||
|
className={`block hover:text-blue-400 cursor-pointer relative ${
|
||||||
|
editing.key === mainkey && editing.subkey === subkey ? 'text-blue-800 dark:text-blue-600' : ''
|
||||||
|
}`}
|
||||||
|
onClick={handleEdit}
|
||||||
|
title="Click to edit"
|
||||||
|
>
|
||||||
|
{`${yamlKeyPrefix(points, mainkey, subkey)}${polylinePointsToPolyline(points)}`}
|
||||||
|
{showButtons ? (
|
||||||
|
<Button
|
||||||
|
className="absolute top-0 right-0"
|
||||||
|
color="red"
|
||||||
|
data-key={mainkey}
|
||||||
|
data-subkey={subkey}
|
||||||
|
onClick={handleRemove}
|
||||||
|
>
|
||||||
|
Remove
|
||||||
|
</Button>
|
||||||
|
) : null}
|
||||||
|
</span>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function getPolylinePoints(polyline) {
|
||||||
|
if (!polyline) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
return polyline.split(',').reduce((memo, point, i) => {
|
||||||
|
if (i % 2) {
|
||||||
|
memo[memo.length - 1].push(parseInt(point, 10));
|
||||||
|
} else {
|
||||||
|
memo.push([parseInt(point, 10)]);
|
||||||
|
}
|
||||||
|
return memo;
|
||||||
|
}, []);
|
||||||
|
}
|
||||||
|
|
||||||
|
function scalePolylinePoints(polylinePoints, scale) {
|
||||||
|
if (!polylinePoints) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
return polylinePoints.map(([x, y]) => [Math.round(x * scale), Math.round(y * scale)]);
|
||||||
|
}
|
||||||
|
|
||||||
|
function polylinePointsToPolyline(polylinePoints) {
|
||||||
|
if (!polylinePoints) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
return polylinePoints.reduce((memo, [x, y]) => `${memo}${x},${y},`, '').replace(/,$/, '');
|
||||||
|
}
|
||||||
|
|
||||||
|
const PolyPointRadius = 10;
|
||||||
|
function PolyPoint({ boundingRef, index, x, y, onMove, onRemove }) {
|
||||||
|
const [hidden, setHidden] = useState(false);
|
||||||
|
|
||||||
|
const handleDragOver = useCallback(
|
||||||
|
(event) => {
|
||||||
|
if (
|
||||||
|
!boundingRef.current ||
|
||||||
|
(event.target !== boundingRef.current && !boundingRef.current.contains(event.target))
|
||||||
|
) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
onMove(index, event.layerX - PolyPointRadius * 2, event.layerY - PolyPointRadius * 2);
|
||||||
|
},
|
||||||
|
[onMove, index, boundingRef.current]
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleDragStart = useCallback(() => {
|
||||||
|
boundingRef.current && boundingRef.current.addEventListener('dragover', handleDragOver, false);
|
||||||
|
setHidden(true);
|
||||||
|
}, [setHidden, boundingRef.current, handleDragOver]);
|
||||||
|
|
||||||
|
const handleDragEnd = useCallback(() => {
|
||||||
|
boundingRef.current && boundingRef.current.removeEventListener('dragover', handleDragOver);
|
||||||
|
setHidden(false);
|
||||||
|
}, [setHidden, boundingRef.current, handleDragOver]);
|
||||||
|
|
||||||
|
const handleRightClick = useCallback(
|
||||||
|
(event) => {
|
||||||
|
event.preventDefault();
|
||||||
|
onRemove(index);
|
||||||
|
},
|
||||||
|
[onRemove, index]
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleClick = useCallback((event) => {
|
||||||
|
event.stopPropagation();
|
||||||
|
event.preventDefault();
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
className={`${hidden ? 'opacity-0' : ''} bg-gray-900 rounded-full absolute z-20`}
|
||||||
|
style={`top: ${y - PolyPointRadius}px; left: ${x - PolyPointRadius}px; width: 20px; height: 20px;`}
|
||||||
|
draggable
|
||||||
|
onclick={handleClick}
|
||||||
|
oncontextmenu={handleRightClick}
|
||||||
|
ondragstart={handleDragStart}
|
||||||
|
ondragend={handleDragEnd}
|
||||||
|
/>
|
||||||
|
);
|
||||||
|
}
|
||||||
38
web/src/Cameras.jsx
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
import { h } from 'preact';
|
||||||
|
import Box from './components/Box';
|
||||||
|
import Events from './Events';
|
||||||
|
import Heading from './components/Heading';
|
||||||
|
import { route } from 'preact-router';
|
||||||
|
import { useContext } from 'preact/hooks';
|
||||||
|
import { ApiHost, Config } from './context';
|
||||||
|
|
||||||
|
export default function Cameras() {
|
||||||
|
const config = useContext(Config);
|
||||||
|
|
||||||
|
if (!config.cameras) {
|
||||||
|
return <p>loading…</p>;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="grid lg:grid-cols-2 md:grid-cols-1 gap-4">
|
||||||
|
{Object.keys(config.cameras).map((camera) => (
|
||||||
|
<Camera name={camera} />
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function Camera({ name }) {
|
||||||
|
const apiHost = useContext(ApiHost);
|
||||||
|
const href = `/cameras/${name}`;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Box
|
||||||
|
className="bg-white dark:bg-gray-700 shadow-lg rounded-lg p-4 hover:bg-gray-300 hover:dark:bg-gray-500 dark:hover:text-gray-900 dark:hover:text-gray-900"
|
||||||
|
href={href}
|
||||||
|
>
|
||||||
|
<Heading size="base">{name}</Heading>
|
||||||
|
<img className="w-full" src={`${apiHost}/api/${name}/latest.jpg`} />
|
||||||
|
</Box>
|
||||||
|
);
|
||||||
|
}
|
||||||
97
web/src/Debug.jsx
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
import { h } from 'preact';
|
||||||
|
import Heading from './components/Heading';
|
||||||
|
import Link from './components/Link';
|
||||||
|
import { ApiHost, Config } from './context';
|
||||||
|
import { Table, Tbody, Thead, Tr, Th, Td } from './components/Table';
|
||||||
|
import { useCallback, useContext, useEffect, useState } from 'preact/hooks';
|
||||||
|
|
||||||
|
export default function Debug() {
|
||||||
|
const apiHost = useContext(ApiHost);
|
||||||
|
const config = useContext(Config);
|
||||||
|
const [stats, setStats] = useState({});
|
||||||
|
const [timeoutId, setTimeoutId] = useState(null);
|
||||||
|
|
||||||
|
const fetchStats = useCallback(async () => {
|
||||||
|
const statsResponse = await fetch(`${apiHost}/api/stats`);
|
||||||
|
const stats = statsResponse.ok ? await statsResponse.json() : {};
|
||||||
|
setStats(stats);
|
||||||
|
setTimeoutId(setTimeout(fetchStats, 1000));
|
||||||
|
}, [setStats]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
fetchStats();
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
return () => {
|
||||||
|
clearTimeout(timeoutId);
|
||||||
|
};
|
||||||
|
}, [timeoutId]);
|
||||||
|
|
||||||
|
const { detectors, detection_fps, service, ...cameras } = stats;
|
||||||
|
if (!service) {
|
||||||
|
return 'loading…';
|
||||||
|
}
|
||||||
|
|
||||||
|
const detectorNames = Object.keys(detectors);
|
||||||
|
const detectorDataKeys = Object.keys(detectors[detectorNames[0]]);
|
||||||
|
|
||||||
|
const cameraNames = Object.keys(cameras);
|
||||||
|
const cameraDataKeys = Object.keys(cameras[cameraNames[0]]);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div>
|
||||||
|
<Heading>
|
||||||
|
Debug <span className="text-sm">{service.version}</span>
|
||||||
|
</Heading>
|
||||||
|
<Table className="w-full">
|
||||||
|
<Thead>
|
||||||
|
<Tr>
|
||||||
|
<Th>detector</Th>
|
||||||
|
{detectorDataKeys.map((name) => (
|
||||||
|
<Th>{name.replace('_', ' ')}</Th>
|
||||||
|
))}
|
||||||
|
</Tr>
|
||||||
|
</Thead>
|
||||||
|
<Tbody>
|
||||||
|
{detectorNames.map((detector, i) => (
|
||||||
|
<Tr index={i}>
|
||||||
|
<Td>{detector}</Td>
|
||||||
|
{detectorDataKeys.map((name) => (
|
||||||
|
<Td key={`${name}-${detector}`}>{detectors[detector][name]}</Td>
|
||||||
|
))}
|
||||||
|
</Tr>
|
||||||
|
))}
|
||||||
|
</Tbody>
|
||||||
|
</Table>
|
||||||
|
|
||||||
|
<Table className="w-full">
|
||||||
|
<Thead>
|
||||||
|
<Tr>
|
||||||
|
<Th>camera</Th>
|
||||||
|
{cameraDataKeys.map((name) => (
|
||||||
|
<Th>{name.replace('_', ' ')}</Th>
|
||||||
|
))}
|
||||||
|
</Tr>
|
||||||
|
</Thead>
|
||||||
|
<Tbody>
|
||||||
|
{cameraNames.map((camera, i) => (
|
||||||
|
<Tr index={i}>
|
||||||
|
<Td>
|
||||||
|
<Link href={`/cameras/${camera}`}>{camera}</Link>
|
||||||
|
</Td>
|
||||||
|
{cameraDataKeys.map((name) => (
|
||||||
|
<Td key={`${name}-${camera}`}>{cameras[camera][name]}</Td>
|
||||||
|
))}
|
||||||
|
</Tr>
|
||||||
|
))}
|
||||||
|
</Tbody>
|
||||||
|
</Table>
|
||||||
|
|
||||||
|
<Heading size="sm">Config</Heading>
|
||||||
|
<pre className="font-mono overflow-y-scroll overflow-x-scroll max-h-96 rounded bg-white dark:bg-gray-900">
|
||||||
|
{JSON.stringify(config, null, 2)}
|
||||||
|
</pre>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
90
web/src/Event.jsx
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
import { h, Fragment } from 'preact';
|
||||||
|
import { ApiHost } from './context';
|
||||||
|
import Box from './components/Box';
|
||||||
|
import Heading from './components/Heading';
|
||||||
|
import Link from './components/Link';
|
||||||
|
import { Table, Thead, Tbody, Tfoot, Th, Tr, Td } from './components/Table';
|
||||||
|
import { useContext, useEffect, useState } from 'preact/hooks';
|
||||||
|
|
||||||
|
export default function Event({ eventId }) {
|
||||||
|
const apiHost = useContext(ApiHost);
|
||||||
|
const [data, setData] = useState(null);
|
||||||
|
|
||||||
|
useEffect(async () => {
|
||||||
|
const response = await fetch(`${apiHost}/api/events/${eventId}`);
|
||||||
|
const data = response.ok ? await response.json() : null;
|
||||||
|
setData(data);
|
||||||
|
}, [apiHost, eventId]);
|
||||||
|
|
||||||
|
if (!data) {
|
||||||
|
return (
|
||||||
|
<div>
|
||||||
|
<Heading>{eventId}</Heading>
|
||||||
|
<p>loading…</p>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const startime = new Date(data.start_time * 1000);
|
||||||
|
const endtime = new Date(data.end_time * 1000);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<Heading>
|
||||||
|
{data.camera} {data.label} <span className="text-sm">{startime.toLocaleString()}</span>
|
||||||
|
</Heading>
|
||||||
|
|
||||||
|
<Box>
|
||||||
|
{data.has_clip ? (
|
||||||
|
<Fragment>
|
||||||
|
<Heading size="sm">Clip</Heading>
|
||||||
|
<video className="w-100" src={`${apiHost}/clips/${data.camera}-${eventId}.mp4`} controls />
|
||||||
|
</Fragment>
|
||||||
|
) : (
|
||||||
|
<p>No clip available</p>
|
||||||
|
)}
|
||||||
|
</Box>
|
||||||
|
|
||||||
|
<Box>
|
||||||
|
<Heading size="sm">{data.has_snapshot ? 'Best image' : 'Thumbnail'}</Heading>
|
||||||
|
<img
|
||||||
|
src={
|
||||||
|
data.has_snapshot
|
||||||
|
? `${apiHost}/clips/${data.camera}-${eventId}.jpg`
|
||||||
|
: `data:image/jpeg;base64,${data.thumbnail}`
|
||||||
|
}
|
||||||
|
alt={`${data.label} at ${(data.top_score * 100).toFixed(1)}% confidence`}
|
||||||
|
/>
|
||||||
|
</Box>
|
||||||
|
|
||||||
|
<Table>
|
||||||
|
<Thead>
|
||||||
|
<Th>Key</Th>
|
||||||
|
<Th>Value</Th>
|
||||||
|
</Thead>
|
||||||
|
<Tbody>
|
||||||
|
<Tr>
|
||||||
|
<Td>Camera</Td>
|
||||||
|
<Td>
|
||||||
|
<Link href={`/cameras/${data.camera}`}>{data.camera}</Link>
|
||||||
|
</Td>
|
||||||
|
</Tr>
|
||||||
|
<Tr index={1}>
|
||||||
|
<Td>Timeframe</Td>
|
||||||
|
<Td>
|
||||||
|
{startime.toLocaleString()} – {endtime.toLocaleString()}
|
||||||
|
</Td>
|
||||||
|
</Tr>
|
||||||
|
<Tr>
|
||||||
|
<Td>Score</Td>
|
||||||
|
<Td>{(data.top_score * 100).toFixed(2)}%</Td>
|
||||||
|
</Tr>
|
||||||
|
<Tr index={1}>
|
||||||
|
<Td>Zones</Td>
|
||||||
|
<Td>{data.zones.join(', ')}</Td>
|
||||||
|
</Tr>
|
||||||
|
</Tbody>
|
||||||
|
</Table>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
120
web/src/Events.jsx
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
import { h } from 'preact';
|
||||||
|
import { ApiHost } from './context';
|
||||||
|
import Box from './components/Box';
|
||||||
|
import Heading from './components/Heading';
|
||||||
|
import Link from './components/Link';
|
||||||
|
import { route } from 'preact-router';
|
||||||
|
import { Table, Thead, Tbody, Tfoot, Th, Tr, Td } from './components/Table';
|
||||||
|
import { useCallback, useContext, useEffect, useState } from 'preact/hooks';
|
||||||
|
|
||||||
|
export default function Events({ url } = {}) {
|
||||||
|
const apiHost = useContext(ApiHost);
|
||||||
|
const [events, setEvents] = useState([]);
|
||||||
|
|
||||||
|
const searchParams = new URL(`${window.location.protocol}//${window.location.host}${url || '/events'}`).searchParams;
|
||||||
|
const searchParamsString = searchParams.toString();
|
||||||
|
|
||||||
|
useEffect(async () => {
|
||||||
|
const response = await fetch(`${apiHost}/api/events?${searchParamsString}`);
|
||||||
|
const data = response.ok ? await response.json() : {};
|
||||||
|
setEvents(data);
|
||||||
|
}, [searchParamsString]);
|
||||||
|
|
||||||
|
const searchKeys = Array.from(searchParams.keys());
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="space-y-4">
|
||||||
|
<Heading>Events</Heading>
|
||||||
|
|
||||||
|
{searchKeys.length ? (
|
||||||
|
<Box>
|
||||||
|
<Heading size="sm">Filters</Heading>
|
||||||
|
<div className="flex flex-wrap space-x-2">
|
||||||
|
{searchKeys.map((filterKey) => (
|
||||||
|
<UnFilterable
|
||||||
|
paramName={filterKey}
|
||||||
|
searchParams={searchParamsString}
|
||||||
|
name={`${filterKey}: ${searchParams.get(filterKey)}`}
|
||||||
|
/>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
</Box>
|
||||||
|
) : null}
|
||||||
|
|
||||||
|
<Box className="min-w-0 overflow-auto">
|
||||||
|
<Table>
|
||||||
|
<Thead>
|
||||||
|
<Tr>
|
||||||
|
<Th></Th>
|
||||||
|
<Th>Camera</Th>
|
||||||
|
<Th>Label</Th>
|
||||||
|
<Th>Score</Th>
|
||||||
|
<Th>Zones</Th>
|
||||||
|
<Th>Date</Th>
|
||||||
|
<Th>Start</Th>
|
||||||
|
<Th>End</Th>
|
||||||
|
</Tr>
|
||||||
|
</Thead>
|
||||||
|
<Tbody>
|
||||||
|
{events.map(
|
||||||
|
(
|
||||||
|
{ camera, id, label, start_time: startTime, end_time: endTime, thumbnail, top_score: score, zones },
|
||||||
|
i
|
||||||
|
) => {
|
||||||
|
const start = new Date(parseInt(startTime * 1000, 10));
|
||||||
|
const end = new Date(parseInt(endTime * 1000, 10));
|
||||||
|
return (
|
||||||
|
<Tr key={id} index={i}>
|
||||||
|
<Td>
|
||||||
|
<a href={`/events/${id}`}>
|
||||||
|
<img className="w-32 max-w-none" src={`data:image/jpeg;base64,${thumbnail}`} />
|
||||||
|
</a>
|
||||||
|
</Td>
|
||||||
|
<Td>
|
||||||
|
<Filterable searchParams={searchParamsString} paramName="camera" name={camera} />
|
||||||
|
</Td>
|
||||||
|
<Td>
|
||||||
|
<Filterable searchParams={searchParamsString} paramName="label" name={label} />
|
||||||
|
</Td>
|
||||||
|
<Td>{(score * 100).toFixed(2)}%</Td>
|
||||||
|
<Td>
|
||||||
|
<ul>
|
||||||
|
{zones.map((zone) => (
|
||||||
|
<li>
|
||||||
|
<Filterable searchParams={searchParamsString} paramName="zone" name={zone} />
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</ul>
|
||||||
|
</Td>
|
||||||
|
<Td>{start.toLocaleDateString()}</Td>
|
||||||
|
<Td>{start.toLocaleTimeString()}</Td>
|
||||||
|
<Td>{end.toLocaleTimeString()}</Td>
|
||||||
|
</Tr>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
)}
|
||||||
|
</Tbody>
|
||||||
|
</Table>
|
||||||
|
</Box>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function Filterable({ searchParams, paramName, name }) {
|
||||||
|
const params = new URLSearchParams(searchParams);
|
||||||
|
params.set(paramName, name);
|
||||||
|
return <Link href={`?${params.toString()}`}>{name}</Link>;
|
||||||
|
}
|
||||||
|
|
||||||
|
function UnFilterable({ searchParams, paramName, name }) {
|
||||||
|
const params = new URLSearchParams(searchParams);
|
||||||
|
params.delete(paramName);
|
||||||
|
return (
|
||||||
|
<a
|
||||||
|
className="bg-gray-700 text-white px-3 py-1 rounded-md hover:bg-gray-300 hover:text-gray-900 dark:bg-gray-300 dark:text-gray-900 dark:hover:bg-gray-700 dark:hover:text-white"
|
||||||
|
href={`?${params.toString()}`}
|
||||||
|
>
|
||||||
|
{name}
|
||||||
|
</a>
|
||||||
|
);
|
||||||
|
}
|
||||||
87
web/src/Sidebar.jsx
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
import { h } from 'preact';
|
||||||
|
import Link from './components/Link';
|
||||||
|
import { Link as RouterLink } from 'preact-router/match';
|
||||||
|
import { useCallback, useState } from 'preact/hooks';
|
||||||
|
|
||||||
|
function HamburgerIcon() {
|
||||||
|
return (
|
||||||
|
<svg fill="currentColor" viewBox="0 0 20 20" className="w-6 h-6">
|
||||||
|
<path
|
||||||
|
fill-rule="evenodd"
|
||||||
|
d="M3 5a1 1 0 011-1h12a1 1 0 110 2H4a1 1 0 01-1-1zM3 10a1 1 0 011-1h12a1 1 0 110 2H4a1 1 0 01-1-1zM9 15a1 1 0 011-1h6a1 1 0 110 2h-6a1 1 0 01-1-1z"
|
||||||
|
clip-rule="evenodd"
|
||||||
|
></path>
|
||||||
|
</svg>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function CloseIcon() {
|
||||||
|
return (
|
||||||
|
<svg fill="currentColor" viewBox="0 0 20 20" className="w-6 h-6">
|
||||||
|
<path
|
||||||
|
fill-rule="evenodd"
|
||||||
|
d="M4.293 4.293a1 1 0 011.414 0L10 8.586l4.293-4.293a1 1 0 111.414 1.414L11.414 10l4.293 4.293a1 1 0 01-1.414 1.414L10 11.414l-4.293 4.293a1 1 0 01-1.414-1.414L8.586 10 4.293 5.707a1 1 0 010-1.414z"
|
||||||
|
clip-rule="evenodd"
|
||||||
|
></path>
|
||||||
|
</svg>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function NavLink({ className = '', href, text }) {
|
||||||
|
const external = href.startsWith('http');
|
||||||
|
const El = external ? Link : RouterLink;
|
||||||
|
const props = external ? { rel: 'noopener nofollow', target: '_blank' } : {};
|
||||||
|
return (
|
||||||
|
<El
|
||||||
|
activeClassName="bg-gray-200 dark:bg-gray-700 dark:hover:bg-gray-600 dark:focus:bg-gray-600 dark:focus:text-white dark:hover:text-white dark:text-gray-200"
|
||||||
|
className={`block px-4 py-2 mt-2 text-sm font-semibold text-gray-900 bg-transparent rounded-lg dark:bg-transparent dark:hover:bg-gray-600 dark:focus:bg-gray-600 dark:focus:text-white dark:hover:text-white dark:text-gray-200 hover:text-gray-900 focus:text-gray-900 hover:bg-gray-200 focus:bg-gray-200 focus:outline-none focus:shadow-outline self-end ${className}`}
|
||||||
|
href={href}
|
||||||
|
{...props}
|
||||||
|
>
|
||||||
|
{text}
|
||||||
|
</El>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export default function Sidebar() {
|
||||||
|
const [open, setOpen] = useState(false);
|
||||||
|
|
||||||
|
const handleToggle = useCallback(() => {
|
||||||
|
setOpen(!open);
|
||||||
|
}, [open, setOpen]);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="flex flex-col w-full md:w-64 text-gray-700 bg-white dark:text-gray-200 dark:bg-gray-700 flex-shrink-0">
|
||||||
|
<div className="flex-shrink-0 px-8 py-4 flex flex-row items-center justify-between">
|
||||||
|
<a
|
||||||
|
href="#"
|
||||||
|
className="text-lg font-semibold tracking-widest text-gray-900 uppercase rounded-lg dark:text-white focus:outline-none focus:shadow-outline"
|
||||||
|
>
|
||||||
|
Frigate
|
||||||
|
</a>
|
||||||
|
<button
|
||||||
|
className="rounded-lg md:hidden rounded-lg focus:outline-none focus:shadow-outline"
|
||||||
|
onClick={handleToggle}
|
||||||
|
>
|
||||||
|
{open ? <CloseIcon /> : <HamburgerIcon />}
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
<nav
|
||||||
|
className={`flex-col flex-grow md:block overflow-hidden px-4 pb-4 md:pb-0 md:overflow-y-auto ${
|
||||||
|
!open ? 'md:h-0 hidden' : ''
|
||||||
|
}`}
|
||||||
|
>
|
||||||
|
<NavLink href="/" text="Cameras" />
|
||||||
|
<NavLink href="/events" text="Events" />
|
||||||
|
<NavLink href="/debug" text="Debug" />
|
||||||
|
<hr className="border-solid border-gray-500 mt-2" />
|
||||||
|
<NavLink
|
||||||
|
className="self-end"
|
||||||
|
href="https://github.com/blakeblackshear/frigate/blob/master/README.md"
|
||||||
|
text="Documentation"
|
||||||
|
/>
|
||||||
|
<NavLink className="self-end" href="https://github.com/blakeblackshear/frigate" text="GitHub" />
|
||||||
|
</nav>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
16
web/src/components/Box.jsx
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
import { h } from 'preact';
|
||||||
|
|
||||||
|
export default function Box({ children, className = '', hover = false, href, ...props }) {
|
||||||
|
const Element = href ? 'a' : 'div';
|
||||||
|
return (
|
||||||
|
<Element
|
||||||
|
className={`bg-white dark:bg-gray-700 shadow-lg rounded-lg p-4 ${
|
||||||
|
hover ? 'hover:bg-gray-300 hover:dark:bg-gray-500 dark:hover:text-gray-900 dark:hover:text-gray-900' : ''
|
||||||
|
} ${className}`}
|
||||||
|
href={href}
|
||||||
|
{...props}
|
||||||
|
>
|
||||||
|
{children}
|
||||||
|
</Element>
|
||||||
|
);
|
||||||
|
}
|
||||||
23
web/src/components/Button.jsx
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
import { h } from 'preact';
|
||||||
|
|
||||||
|
const noop = () => {};
|
||||||
|
|
||||||
|
const BUTTON_COLORS = {
|
||||||
|
blue: { normal: 'bg-blue-500', hover: 'hover:bg-blue-400' },
|
||||||
|
red: { normal: 'bg-red-500', hover: 'hover:bg-red-400' },
|
||||||
|
green: { normal: 'bg-green-500', hover: 'hover:bg-green-400' },
|
||||||
|
};
|
||||||
|
|
||||||
|
export default function Button({ children, className, color = 'blue', onClick, size, ...attrs }) {
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
role="button"
|
||||||
|
tabindex="0"
|
||||||
|
className={`rounded ${BUTTON_COLORS[color].normal} text-white pl-4 pr-4 pt-2 pb-2 font-bold shadow ${BUTTON_COLORS[color].hover} hover:shadow-lg cursor-pointer ${className}`}
|
||||||
|
onClick={onClick || noop}
|
||||||
|
{...attrs}
|
||||||
|
>
|
||||||
|
{children}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
5
web/src/components/Heading.jsx
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
import { h } from 'preact';
|
||||||
|
|
||||||
|
export default function Heading({ children, className = '', size = '2xl' }) {
|
||||||
|
return <h1 className={`font-semibold tracking-widest uppercase text-${size} ${className}`}>{children}</h1>;
|
||||||
|
}
|
||||||
9
web/src/components/Link.jsx
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
import { h } from 'preact';
|
||||||
|
|
||||||
|
export default function Link({ className, children, href, ...props }) {
|
||||||
|
return (
|
||||||
|
<a className={`text-blue-500 dark:text-blue-400 hover:underline ${className}`} href={href} {...props}>
|
||||||
|
{children}
|
||||||
|
</a>
|
||||||
|
);
|
||||||
|
}
|
||||||
30
web/src/components/Switch.jsx
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
import { h } from 'preact';
|
||||||
|
import { useCallback, useState } from 'preact/hooks';
|
||||||
|
|
||||||
|
export default function Switch({ checked, label, id, onChange }) {
|
||||||
|
const handleChange = useCallback(
|
||||||
|
(event) => {
|
||||||
|
console.log(event.target.checked, !checked);
|
||||||
|
onChange(id, !checked);
|
||||||
|
},
|
||||||
|
[id, onChange, checked]
|
||||||
|
);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<label for={id} className="flex items-center cursor-pointer">
|
||||||
|
<div className="relative">
|
||||||
|
<input id={id} type="checkbox" className="hidden" onChange={handleChange} checked={checked} />
|
||||||
|
<div
|
||||||
|
className={`transition-colors toggle__line w-12 h-6 ${
|
||||||
|
!checked ? 'bg-gray-400' : 'bg-blue-400'
|
||||||
|
} rounded-full shadow-inner`}
|
||||||
|
/>
|
||||||
|
<div
|
||||||
|
className="transition-transform absolute w-6 h-6 bg-white rounded-full shadow-md inset-y-0 left-0"
|
||||||
|
style={checked ? 'transform: translateX(100%);' : 'transform: translateX(0%);'}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
<div className="ml-3 text-gray-700 font-medium dark:text-gray-200">{label}</div>
|
||||||
|
</label>
|
||||||
|
);
|
||||||
|
}
|
||||||
31
web/src/components/Table.jsx
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
import { h } from 'preact';
|
||||||
|
|
||||||
|
export function Table({ children, className = '' }) {
|
||||||
|
return (
|
||||||
|
<table className={`table-auto border-collapse text-gray-900 dark:text-gray-200 ${className}`}>{children}</table>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function Thead({ children, className = '' }) {
|
||||||
|
return <thead className={`${className}`}>{children}</thead>;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function Tbody({ children, className = '' }) {
|
||||||
|
return <tbody className={`${className}`}>{children}</tbody>;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function Tfoot({ children, className = '' }) {
|
||||||
|
return <tfoot className={`${className}`}>{children}</tfoot>;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function Tr({ children, className = '', index }) {
|
||||||
|
return <tr className={`${index % 2 ? 'bg-gray-200 dark:bg-gray-700' : ''} ${className}`}>{children}</tr>;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function Th({ children, className = '' }) {
|
||||||
|
return <th className={`border-b-2 border-gray-400 p-4 text-left ${className}`}>{children}</th>;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function Td({ children, className = '' }) {
|
||||||
|
return <td className={`p-4 ${className}`}>{children}</td>;
|
||||||
|
}
|
||||||