-
Notifications
You must be signed in to change notification settings - Fork 3
/
customvalues.template.yaml
108 lines (102 loc) · 6.54 KB
/
customvalues.template.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
videoSource:
logLevel: DEBUG # Options: ERROR, WARNING, INFO, DEBUG
configs:
- id: stream1 # The name of the video stream. Will be used as Redis stream key and as camera_id in database output.
uri: 'uri' # Where to retrieve the video from. Must be in a format that OpenCV VideoCapture understands. RTSP stream makes the most sense.
max_fps: 5 # Effectively sets a lower bound on the time between to frames. 0 means off. Integer fractions of original frame rate make the most sense (i.e. 15, 10, 5 for a 30fps source).
scale_width: 0 # If > 0, scale the image to the given width (preserving aspect ratio)
jpeg_quality: 80 # JPEG quality 0 - 100 (100 is lossless and big); Reasonable starting points (for image height): 2160 = 80, 1080 = 90, <720 = 95
objectDetector:
logLevel: DEBUG # Options: ERROR, WARNING, INFO, DEBUG
customWeights:
enabled: false # Whether to inject custom weights from init container (into custom_weights/*)
imageTag: weights-image-tag # Which tag of the starwitorg/sae-object-detector-weights image to use
config:
model:
weights_path: custom_weights/xyz # Which weights to load. yolov8[nsmlx].pt are shipped with the object-detector. For custom weights see above.
device: cpu # Options: cpu, cuda (uses gpu; needs proper setup; see README)
nms_agnostic: false # Whether to use class-agnostic non-maximum suppression (NMS) (for overlapping detections)
drop_edge_detections: false # Drop detections touching the frame borders
inference_size: [ 640, 640 ] # What resolution the image will be downscaled to before object-detection inference (should be square and a multiple of 32)
classes: [ 2 ] # Which object classes to detect (refer to coco object classes)
redis:
stream_ids:
- stream1 # On which video streams to detect objects on. Mostly all video source stream ids.
objectTracker:
logLevel: INFO # Options: ERROR, WARNING, INFO, DEBUG
needsGpu: False # Whether the algorithm needs gpu support (needs proper setup; see README)
streamIds: # On which video streams to track objects (the same as object-detector stream ids above)
- stream1
config: # Tracker specific config (see object-tracker repo / Boxmot for details)
tracker_algorithm: OCSORT
tracker_config:
det_thresh: 0.2
max_age: 30
min_hits: 3
asso_threshold: 0.3
delta_t: 3
asso_func: 'iou'
inertia: 0.2
use_byte: False
Q_xy_scaling: 1
Q_s_scaling: 1
geoMapper:
enabled: false # If the geomapper should be deployed (by default it reads from "objecttracker:*"" and outputs into "geomapper:*")
config:
cameras: # Parameters have to be specified for each camera (it makes no sense to run this without correct-ish parameters)
- stream_id: stream1 # This must match one of the existing camera streams
passthrough: false # If the stream should be passed through without geo mapping (all other parameters are ignored if true)
view_x_deg: 90 # The horizontal angle of view
image_width_px: 1920
image_height_px: 1080
elevation_m: 10 # Height of the camera above ground
tilt_deg: 45 # 0° = camera facing straight down (image plane parallel to the ground), 90° camera pointed at horizon (image plane 90° to the ground)
pos_lat: 52 # Camera location latitude
pos_lon: 10 # Camera location longitude
heading_deg: 90 # Heading the camera is pointed at
brown_distortion_k1: 0 # Distortion correction (abc-model is also available)
brown_distortion_k2: 0
brown_distortion_k3: 0
mapping_area: # Must be a geojson `Polygon`. If set, only detections within that polygon will be mapped (i.e. will have coordinates set)
type: Polygon
coordinates: [[ # [lon,lat] list of polygon points. The last point must equal the first to close the polygon!
[10, 50],
]]
remove_unmapped_detections: false # # If unmapped detections should be removed (i.e. detections filtered by mapping_area, see above)
geoMerger:
enabled: false
config:
merging_config:
max_distance_m: 2
merging_window_ms: 1000
target_mps: 10
input_stream_ids:
- stream1
- stream2
output_stream_id: merged
redisWriter:
enabled: false # If the redis-writer should be deployed (off by default)
configs: # One config per target
- name: writer1 # A unique name to identify the instance
redis: # The SAE internal Redis instance
input_stream_prefix: objecttracker # The prefix of the input stream (e.g. objecttracker or geomapper)
target_redis: # The Redis instance to write to
host: redis-host
port: 6379
output_stream_prefix: saeoutput # The prefix of the output stream (e.g. data will be published to saeoutput:<stream_id>)
buffer_length: 10 # How many messages to buffer before discarding
target_stream_maxlen: 100 # How many messages to keep in the output stream (translates to XADD maxlen option)
tls: false
stream_ids: # Which video stream to attach to
- stream1
databaseWriter:
enabled: true # If the database-writer should be deployed
config:
redisStreamIds:
- stream1 # Which video streams to read (and extract detection and tracking data from)
db:
jdbcUrl: jdbc:postgresql://host:port/ # JDBC URL. Must be a Postgres database. Does not have to be a Timescale (although the latter makes most sense)
schema: schema
username: username
password: password
hypertable: tablename # Table name. Does not have to be a Hypertable.