-
Notifications
You must be signed in to change notification settings - Fork 0
/
vibration_aquisition.py
1010 lines (847 loc) · 43.5 KB
/
vibration_aquisition.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
#####################################################
## librealsense T265 to MAVLink ##
#####################################################
# This script assumes pyrealsense2.[].so file is found under the same directory as this script
# Install required packages:
# pip3 install pyrealsense2
# pip3 install transformations
# pip3 install pymavlink
# pip3 install apscheduler
# pip3 install pyserial
# File based of off work by Thien Nguyen https://github.com/thien94/vision_to_mavros
# Set the path for IDLE
import sys
sys.path.append("/usr/local/lib/")
# Set MAVLink protocol to 2.
import os
os.environ["MAVLINK20"] = "1"
# Import the libraries
import pyrealsense2 as rs
import numpy as np
import transformations as tf
import math as m
import time
import argparse
import threading
import signal
from time import sleep
from apscheduler.schedulers.background import BackgroundScheduler
from dronekit import connect, VehicleMode
from pymavlink import mavutil
import cv2
import dt_apriltags
import mpu6050_logger
import vision_control
# Replacement of the standard print() function to flush the output
def progress(string):
print(string, file=sys.stdout)
sys.stdout.flush()
#######################################
# Parameters
#######################################
# Default configurations for connection to the FCU
connection_string_default = '/dev/ttyTHS1'
connection_baudrate_default = 460800 #921600
connection_timeout_sec_default = 5
# Transformation to convert different camera orientations to NED convention. Replace camera_orientation_default for your configuration.
# 0: Forward, USB port to the right
# 1: Downfacing, USB port to the right
# 2: Forward, 45 degree tilted down
# Important note for downfacing camera: you need to tilt the vehicle's nose up a little - not flat - before you run the script, otherwise the initial yaw will be randomized, read here for more details: https://github.com/IntelRealSense/librealsense/issues/4080. Tilt the vehicle to any other sides and the yaw might not be as stable.
camera_orientation_default = 0
# https://mavlink.io/en/messages/common.html#VISION_POSITION_ESTIMATE
enable_msg_vision_position_estimate = True
vision_position_estimate_msg_hz_default = 30.0
# https://mavlink.io/en/messages/ardupilotmega.html#VISION_POSITION_DELTA
enable_msg_vision_position_delta = False
vision_position_delta_msg_hz_default = 30.0
# https://mavlink.io/en/messages/common.html#VISION_SPEED_ESTIMATE
enable_msg_vision_speed_estimate = True
vision_speed_estimate_msg_hz_default = 30.0
# https://mavlink.io/en/messages/common.html#STATUSTEXT
enable_update_tracking_confidence_to_gcs = True
update_tracking_confidence_to_gcs_hz_default = 1.0
# Monitor user's online input via keyboard, can only be used when runs from terminal
enable_user_keyboard_input = True
# Default global position for EKF home/ origin
enable_auto_set_ekf_home = False
home_lat = 535608820 # Somewhere random
home_lon = -1138528910 # Somewhere random
home_alt = 600000 # Somewhere random
# TODO: Taken care of by ArduPilot, so can be removed (once the handling on AP side is confirmed stable)
# In NED frame, offset from the IMU or the center of gravity to the camera's origin point
body_offset_enabled = 0
body_offset_x = 0 # In meters (m)
body_offset_y = 0 # In meters (m)
body_offset_z = 0 # In meters (m)
# Global scale factor, position x y z will be scaled up/down by this factor
scale_factor = 1.0
# Enable using yaw from compass to align north (zero degree is facing north)
compass_enabled = 0
# pose data confidence: 0x0 - Failed / 0x1 - Low / 0x2 - Medium / 0x3 - High
pose_data_confidence_level = ('FAILED', 'Low', 'Medium', 'High')
# lock for thread synchronization
lock = threading.Lock()
mavlink_thread_should_exit = False
# default exit code is failure - a graceful termination with a
# terminate signal is possible.
exit_code = 1
#######################################
# Functions for OpenCV
#######################################
"""
In this section, we will set up the functions that will translate the camera
intrinsics and extrinsics from librealsense into parameters that can be used
with OpenCV.
The T265 uses very wide angle lenses, so the distortion is modeled using a four
parameter distortion model known as Kanalla-Brandt. OpenCV supports this
distortion model in their "fisheye" module, more details can be found here:
https://docs.opencv.org/3.4/db/d58/group__calib3d__fisheye.html
"""
"""
Returns R, T transform from src to dst
"""
def get_extrinsics(src, dst):
extrinsics = src.get_extrinsics_to(dst)
R = np.reshape(extrinsics.rotation, [3,3]).T
T = np.array(extrinsics.translation)
return (R, T)
"""
Returns a camera matrix K from librealsense intrinsics
"""
def camera_matrix(intrinsics):
return np.array([[intrinsics.fx, 0, intrinsics.ppx],
[ 0, intrinsics.fy, intrinsics.ppy],
[ 0, 0, 1]])
"""
Returns the fisheye distortion from librealsense intrinsics
"""
def fisheye_distortion(intrinsics):
return np.array(intrinsics.coeffs[:4])
#######################################
# Functions for AprilTag detection
#######################################
tag_landing_id = 19
tag_landing_size = 0.151 # tag's border size, measured in meter
tag_image_source = "left" # for Realsense T265, we can use "left" or "right"
at_detector = dt_apriltags.Detector(searchpath=['apriltags'],
families='tag36h11',
nthreads=1,
quad_decimate=1.0,
quad_sigma=0.0,
refine_edges=1,
decode_sharpening=0.25,
debug=0)
#######################################
# Global variables
#######################################
# FCU connection variables
# Camera-related variables
pipe = None
pose_sensor = None
linear_accel_cov = 0.01
angular_vel_cov = 0.01
# Data variables
data = None
prev_data = None
H_aeroRef_aeroBody = None
V_aeroRef_aeroBody = None
heading_north_yaw = None
current_confidence_level = None
current_time_us = 0
# Increment everytime pose_jumping or relocalization happens
# See here: https://github.com/IntelRealSense/librealsense/blob/master/doc/t265.md#are-there-any-t265-specific-options
# For AP, a non-zero "reset_counter" would mean that we could be sure that the user's setup was using mavlink2
reset_counter = 1
#######################################
# Parsing user' inputs
#######################################
parser = argparse.ArgumentParser(description='Reboots vehicle')
parser.add_argument('--connect',
help="Vehicle connection target string. If not specified, a default string will be used.")
parser.add_argument('--baudrate', type=float,
help="Vehicle connection baudrate. If not specified, a default value will be used.")
parser.add_argument('--vision_position_estimate_msg_hz', type=float,
help="Update frequency for VISION_POSITION_ESTIMATE message. If not specified, a default value will be used.")
parser.add_argument('--vision_position_delta_msg_hz', type=float,
help="Update frequency for VISION_POSITION_DELTA message. If not specified, a default value will be used.")
parser.add_argument('--vision_speed_estimate_msg_hz', type=float,
help="Update frequency for VISION_SPEED_DELTA message. If not specified, a default value will be used.")
parser.add_argument('--scale_calib_enable', default=False, action='store_true',
help="Scale calibration. Only run while NOT in flight")
parser.add_argument('--camera_orientation', type=int,
help="Configuration for camera orientation. Currently supported: forward, usb port to the right - 0; downward, usb port to the right - 1, 2: forward tilted down 45deg")
parser.add_argument('--debug_enable',type=int,
help="Enable debug messages on terminal")
parser.add_argument('--visualization',type=int,
help="Enable visualization. Ensure that a monitor is connected")
parser.add_argument('--log_file_name',type=str,
help="string for file name")
args = parser.parse_args()
connection_string = args.connect
connection_baudrate = args.baudrate
vision_position_estimate_msg_hz = args.vision_position_estimate_msg_hz
vision_position_delta_msg_hz = args.vision_position_delta_msg_hz
vision_speed_estimate_msg_hz = args.vision_speed_estimate_msg_hz
scale_calib_enable = args.scale_calib_enable
camera_orientation = args.camera_orientation
debug_enable = args.debug_enable
visualization = args.visualization
log_file_name = args.log_file_name
# Using default values if no specified inputs
if not connection_string:
connection_string = connection_string_default
progress("INFO: Using default connection_string %s" % connection_string)
else:
progress("INFO: Using connection_string %s" % connection_string)
if not connection_baudrate:
connection_baudrate = connection_baudrate_default
progress("INFO: Using default connection_baudrate %s" % connection_baudrate)
else:
progress("INFO: Using connection_baudrate %s" % connection_baudrate)
if not vision_position_estimate_msg_hz:
vision_position_estimate_msg_hz = vision_position_estimate_msg_hz_default
progress("INFO: Using default vision_position_estimate_msg_hz %s" % vision_position_estimate_msg_hz)
else:
progress("INFO: Using vision_position_estimate_msg_hz %s" % vision_position_estimate_msg_hz)
if not vision_position_delta_msg_hz:
vision_position_delta_msg_hz = vision_position_delta_msg_hz_default
progress("INFO: Using default vision_position_delta_msg_hz %s" % vision_position_delta_msg_hz)
else:
progress("INFO: Using vision_position_delta_msg_hz %s" % vision_position_delta_msg_hz)
if not vision_speed_estimate_msg_hz:
vision_speed_estimate_msg_hz = vision_speed_estimate_msg_hz_default
progress("INFO: Using default vision_speed_estimate_msg_hz %s" % vision_speed_estimate_msg_hz)
else:
progress("INFO: Using vision_speed_estimate_msg_hz %s" % vision_speed_estimate_msg_hz)
if body_offset_enabled == 1:
progress("INFO: Using camera position offset: Enabled, x y z is %s %s %s" % (body_offset_x, body_offset_y, body_offset_z))
else:
progress("INFO: Using camera position offset: Disabled")
if compass_enabled == 1:
progress("INFO: Using compass: Enabled. Heading will be aligned to north.")
else:
progress("INFO: Using compass: Disabled")
if scale_calib_enable == True:
progress("\nINFO: SCALE CALIBRATION PROCESS. DO NOT RUN DURING FLIGHT.\nINFO: TYPE IN NEW SCALE IN FLOATING POINT FORMAT\n")
else:
if scale_factor == 1.0:
progress("INFO: Using default scale factor %s" % scale_factor)
else:
progress("INFO: Using scale factor %s" % scale_factor)
if not camera_orientation:
camera_orientation = camera_orientation_default
progress("INFO: Using default camera orientation %s" % camera_orientation)
else:
progress("INFO: Using camera orientation %s" % camera_orientation)
if camera_orientation == 0: # Forward, USB port to the right
H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
H_T265body_aeroBody = np.linalg.inv(H_aeroRef_T265Ref)
elif camera_orientation == 1: # Downfacing, USB port to the right
H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
H_T265body_aeroBody = np.array([[0,1,0,0],[1,0,0,0],[0,0,-1,0],[0,0,0,1]])
elif camera_orientation == 2: # 45degree forward
H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
H_T265body_aeroBody = (tf.euler_matrix(m.pi/4, 0, 0)).dot(np.linalg.inv(H_aeroRef_T265Ref))
else: # Default is facing forward, USB port to the right
H_aeroRef_T265Ref = np.array([[0,0,-1,0],[1,0,0,0],[0,-1,0,0],[0,0,0,1]])
H_T265body_aeroBody = np.linalg.inv(H_aeroRef_T265Ref)
if not debug_enable:
debug_enable = 0
else:
debug_enable = 1
np.set_printoptions(precision=4, suppress=True) # Format output on terminal
progress("INFO: Debug messages enabled.")
if not log_file_name:
log_file_name = "test_log"
print("INFO: Logging to test_log")
if not visualization:
visualization = 0
print("INFO: Visualization: Disabled")
else:
visualization = 1
print("INFO: Visualization: Enabled. Checking if monitor is connected...")
WINDOW_TITLE = 'Apriltag detection from T265 images'
cv2.namedWindow(WINDOW_TITLE, cv2.WINDOW_AUTOSIZE)
print("INFO: Monitor is connected. Press `q` to exit.")
display_mode = "stack"
#######################################
# Functions - MAVLink
#######################################
def forwarding_func(conn, msg):
if msg.get_type() == "PARAM_VALUE":
msg.param_id = str.encode(msg.param_id)
elif msg.get_type() == "PARAM_SET":
msg.param_id = str.encode(msg.param_id)
elif msg.get_type() == "PARAM_REQUEST_READ":
msg.param_id = str.encode(msg.param_id)
elif msg.get_type() == "STATUSTEXT":
msg.text = str.encode(msg.text)
conn.mav.send(msg)
def reboot():
with lock:
conn.mav.command_long_send(
0, 0, # target_system, target_component
mavutil.mavlink.MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN, # command
0, # confirmation
1, # param 1, autopilot (reboot)
0, # param 2, onboard computer (do nothing)
0, # param 3, camera (do nothing)
0, # param 4, mount (do nothing)
0, 0, 0) # param 5 ~ 7 not used
def send_hb():
with lock:
conn.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_ONBOARD_CONTROLLER,
mavutil.mavlink.MAV_AUTOPILOT_GENERIC,
0,
0,
0)
def mavlink_loop(conn, callbacks, fwd_conn):
'''a main routine for a thread; reads data from a mavlink connection,
calling callbacks based on message type received.
'''
while not mavlink_thread_should_exit:
m = conn.recv_match(timeout=1, blocking=True)
if m is None:
continue
try:
for c in callbacks:
c(m)
forwarding_func(fwd_conn, m)
except Exception as e:
print(e)
print("got an error with msg type: " + str(m.get_type()))
# https://mavlink.io/en/messages/common.html#VISION_POSITION_ESTIMATE
def send_vision_position_estimate_message():
global current_time_us, H_aeroRef_aeroBody, reset_counter
with lock:
if H_aeroRef_aeroBody is not None:
# Setup angle data
rpy_rad = np.array( tf.euler_from_matrix(H_aeroRef_aeroBody, 'sxyz'))
# Setup covariance data, which is the upper right triangle of the covariance matrix, see here: https://files.gitter.im/ArduPilot/VisionProjects/1DpU/image.png
# Attemp #01: following this formula https://github.com/IntelRealSense/realsense-ros/blob/development/realsense2_camera/src/base_realsense_node.cpp#L1406-L1411
cov_pose = linear_accel_cov * pow(10, 3 - int(data.tracker_confidence))
cov_twist = angular_vel_cov * pow(10, 1 - int(data.tracker_confidence))
covariance = np.array([cov_pose, 0, 0, 0, 0, 0,
cov_pose, 0, 0, 0, 0,
cov_pose, 0, 0, 0,
cov_twist, 0, 0,
cov_twist, 0,
cov_twist])
# Send the message
conn.mav.vision_position_estimate_send(
current_time_us, # us Timestamp (UNIX time or time since system boot)
H_aeroRef_aeroBody[0][3], # Global X position
H_aeroRef_aeroBody[1][3], # Global Y position
H_aeroRef_aeroBody[2][3], # Global Z position
rpy_rad[0], # Roll angle
rpy_rad[1], # Pitch angle
rpy_rad[2], # Yaw angle
covariance, # Row-major representation of pose 6x6 cross-covariance matrix
reset_counter # Estimate reset counter. Increment every time pose estimate jumps.
)
# https://mavlink.io/en/messages/ardupilotmega.html#VISION_POSITION_DELTA
def send_vision_position_delta_message():
global current_time_us, current_confidence_level, H_aeroRef_aeroBody
with lock:
if H_aeroRef_aeroBody is not None:
# Calculate the deltas in position, attitude and time from the previous to current orientation
H_aeroRef_PrevAeroBody = send_vision_position_delta_message.H_aeroRef_PrevAeroBody
H_PrevAeroBody_CurrAeroBody = (np.linalg.inv(H_aeroRef_PrevAeroBody)).dot(H_aeroRef_aeroBody)
delta_time_us = current_time_us - send_vision_position_delta_message.prev_time_us
delta_position_m = [H_PrevAeroBody_CurrAeroBody[0][3], H_PrevAeroBody_CurrAeroBody[1][3], H_PrevAeroBody_CurrAeroBody[2][3]]
delta_angle_rad = np.array( tf.euler_from_matrix(H_PrevAeroBody_CurrAeroBody, 'sxyz'))
# Send the message
conn.mav.vision_position_delta_send(
current_time_us, # us: Timestamp (UNIX time or time since system boot)
delta_time_us, # us: Time since last reported camera frame
delta_angle_rad, # float[3] in radian: Defines a rotation vector in body frame that rotates the vehicle from the previous to the current orientation
delta_position_m, # float[3] in m: Change in position from previous to current frame rotated into body frame (0=forward, 1=right, 2=down)
current_confidence_level # Normalized confidence value from 0 to 100.
)
# Save static variables
send_vision_position_delta_message.H_aeroRef_PrevAeroBody = H_aeroRef_aeroBody
send_vision_position_delta_message.prev_time_us = current_time_us
# https://mavlink.io/en/messages/common.html#VISION_SPEED_ESTIMATE
def send_vision_speed_estimate_message():
global current_time_us, V_aeroRef_aeroBody, reset_counter
with lock:
if V_aeroRef_aeroBody is not None:
# Attemp #01: following this formula https://github.com/IntelRealSense/realsense-ros/blob/development/realsense2_camera/src/base_realsense_node.cpp#L1406-L1411
cov_pose = linear_accel_cov * pow(10, 3 - int(data.tracker_confidence))
covariance = np.array([cov_pose, 0, 0,
0, cov_pose, 0,
0, 0, cov_pose])
# Send the message
conn.mav.vision_speed_estimate_send(
current_time_us, # us Timestamp (UNIX time or time since system boot)
V_aeroRef_aeroBody[0][3], # Global X speed
V_aeroRef_aeroBody[1][3], # Global Y speed
V_aeroRef_aeroBody[2][3], # Global Z speed
covariance, # covariance
reset_counter # Estimate reset counter. Increment every time pose estimate jumps.
)
def goto_position_target_local_ned(forward, right, down, yaw):
"""
Send SET_POSITION_TARGET_LOCAL_NED command to request the vehicle fly to a specified
location in the North, East, Down frame.
It is important to remember that in this frame, positive altitudes are entered as negative
"Down" values. So if down is "10", this will be 10 metres below the home altitude.
"""
with lock:
conn.mav.set_position_target_local_ned_send(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_BODY_OFFSET_NED, # frame
0b100111111000, # type_mask (only positions enabled)
forward, right, down, # x, y, z positions (or North, East, Down in the MAV_FRAME_BODY_NED frame
0, 0, 0, # x, y, z velocity in m/s (not used)
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
yaw, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# Update the changes of confidence level on GCS and terminal
def update_tracking_confidence_to_gcs():
if data is not None and update_tracking_confidence_to_gcs.prev_confidence_level != data.tracker_confidence:
confidence_status_string = 'Tracking confidence: ' + pose_data_confidence_level[data.tracker_confidence]
send_msg_to_gcs(confidence_status_string)
update_tracking_confidence_to_gcs.prev_confidence_level = data.tracker_confidence
# https://mavlink.io/en/messages/common.html#STATUSTEXT
def send_msg_to_gcs(text_to_be_sent):
# MAV_SEVERITY: 0=EMERGENCY 1=ALERT 2=CRITICAL 3=ERROR, 4=WARNING, 5=NOTICE, 6=INFO, 7=DEBUG, 8=ENUM_END
text_msg = 'T265: ' + text_to_be_sent
conn.mav.statustext_send(mavutil.mavlink.MAV_SEVERITY_INFO, text_msg.encode())
progress("INFO: %s" % text_to_be_sent)
# Send a mavlink SET_GPS_GLOBAL_ORIGIN message (http://mavlink.org/messages/common#SET_GPS_GLOBAL_ORIGIN), which allows us to use local position information without a GPS.
def set_default_global_origin():
conn.mav.set_gps_global_origin_send(
1,
home_lat,
home_lon,
home_alt
)
# Send a mavlink SET_HOME_POSITION message (http://mavlink.org/messages/common#SET_HOME_POSITION), which allows us to use local position information without a GPS.
def set_default_home_position():
x = 0
y = 0
z = 0
q = [1, 0, 0, 0] # w x y z
approach_x = 0
approach_y = 0
approach_z = 1
conn.mav.set_home_position_send(
1,
home_lat,
home_lon,
home_alt,
x,
y,
z,
q,
approach_x,
approach_y,
approach_z
)
# Request a timesync update from the flight controller, for future work.
# TODO: Inspect the usage of timesync_update
def update_timesync(ts=0, tc=0):
if ts == 0:
ts = int(round(time.time() * 1000))
conn.mav.timesync_send(
tc, # tc1
ts # ts1
)
# Listen to attitude data to acquire heading when compass data is enabled
def att_msg_callback(value):
if value.get_type() == "ATTITUDE":
global heading_north_yaw
if heading_north_yaw is None:
heading_north_yaw = value.yaw
progress("INFO: Received first ATTITUDE message with heading yaw %.2f degrees" % m.degrees(heading_north_yaw))
#######################################
# Functions - T265
#######################################
def increment_reset_counter():
global reset_counter
if reset_counter >= 255:
reset_counter = 1
reset_counter += 1
# List of notification events: https://github.com/IntelRealSense/librealsense/blob/development/include/librealsense2/h/rs_types.h
# List of notification API: https://github.com/IntelRealSense/librealsense/blob/development/common/notifications.cpp
def realsense_notification_callback(notif):
progress("INFO: T265 event: " + notif)
if notif.get_category() is rs.notification_category.pose_relocalization:
increment_reset_counter()
send_msg_to_gcs('Relocalization detected')
def realsense_connect():
global pipe, pose_sensor
# Declare RealSense pipeline, encapsulating the actual device and sensors
pipe = rs.pipeline()
# Build config object before requesting data
cfg = rs.config()
# Enable the stream we are interested in
cfg.enable_stream(rs.stream.pose) # Positional data
cfg.enable_stream(rs.stream.fisheye, 1) # Image stream left
cfg.enable_stream(rs.stream.fisheye, 2) # Image stream right
# Configure callback for relocalization event
device = cfg.resolve(pipe).get_device()
pose_sensor = device.first_pose_sensor()
pose_sensor.set_notifications_callback(realsense_notification_callback)
# Start streaming with requested config
pipe.start(cfg)
#######################################
# Functions - Miscellaneous
#######################################
# Monitor user input from the terminal and perform action accordingly
def user_input_monitor():
global scale_factor
while True:
# Special case: updating scale
if scale_calib_enable == True:
scale_factor = float(input("INFO: Type in new scale as float number\n"))
progress("INFO: New scale is %s" % scale_factor)
if enable_auto_set_ekf_home:
send_msg_to_gcs('Set EKF home with default GPS location')
set_default_global_origin()
set_default_home_position()
time.sleep(1) # Wait a short while for FCU to start working
# Add new action here according to the key pressed.
# Enter: Set EKF home when user press enter
try:
c = input()
if c == "":
send_msg_to_gcs('Set EKF home with default GPS location')
set_default_global_origin()
set_default_home_position()
elif c=="r":
reboot()
elif c == "q":
main_loop_should_quit = True
else:
progress("Got keyboard input %s" % c)
except IOError: pass
#######################################
# Main code starts here
#######################################
vc = vision_control.VisionControl(goto_position_target_local_ned, log_file_name + "_vision_controller")
try:
progress("INFO: pyrealsense2 version: %s" % str(rs.__version__))
except Exception:
# fail silently
pass
progress("INFO: Starting Vehicle communications")
conn = mavutil.mavlink_connection(
connection_string,
autoreconnect = True,
source_system = 1,
source_component = 93,
baud=connection_baudrate,
force_connected=True,
)
#udp_conn = mavutil.mavlink_connection('udpout:192.168.1.73:15667', source_system=1, source_component=1)
#udp_conn = mavutil.mavlink_connection('udpout:10.42.0.1:15667', source_system=1, source_component=1)
udp_conn = mavutil.mavlink_connection('udpout:192.168.166.94:15667', source_system=1, source_component=1)
mavlink_callbacks = [att_msg_callback, vc.update_mavlink_msg]
rover_thread = threading.Thread(target=mavlink_loop, args=(conn, mavlink_callbacks, udp_conn))
rover_thread.start()
gcs_thread = threading.Thread(target=mavlink_loop, args=(udp_conn, [], conn))
gcs_thread.start()
# connecting and configuring the camera is a little hit-and-miss.
# Start a timer and rely on a restart of the script to get it working.
# Configuring the camera appears to block all threads, so we can't do
# this internally.
# send_msg_to_gcs('Setting timer...')
signal.setitimer(signal.ITIMER_REAL, 5) # seconds...
send_msg_to_gcs('Connecting to camera...')
realsense_connect()
send_msg_to_gcs('Camera connected.')
signal.setitimer(signal.ITIMER_REAL, 0) # cancel alarm
# Send MAVlink messages in the background at pre-determined frequencies
sched = BackgroundScheduler()
if enable_msg_vision_position_estimate:
sched.add_job(send_vision_position_estimate_message, 'interval', seconds = 1/vision_position_estimate_msg_hz)
if enable_msg_vision_position_delta:
sched.add_job(send_vision_position_delta_message, 'interval', seconds = 1/vision_position_delta_msg_hz)
send_vision_position_delta_message.H_aeroRef_PrevAeroBody = tf.quaternion_matrix([1,0,0,0])
send_vision_position_delta_message.prev_time_us = int(round(time.time() * 1000000))
if enable_msg_vision_speed_estimate:
sched.add_job(send_vision_speed_estimate_message, 'interval', seconds = 1/vision_speed_estimate_msg_hz)
if enable_update_tracking_confidence_to_gcs:
sched.add_job(update_tracking_confidence_to_gcs, 'interval', seconds = 1/update_tracking_confidence_to_gcs_hz_default)
update_tracking_confidence_to_gcs.prev_confidence_level = -1
# A separate thread to monitor user input
if enable_user_keyboard_input:
user_keyboard_input_thread = threading.Thread(target=user_input_monitor)
user_keyboard_input_thread.daemon = True
user_keyboard_input_thread.start()
progress("INFO: Press Enter to set EKF home at default location")
sched.add_job(send_hb, 'interval', seconds = 1/10.0)
sched.start()
# gracefully terminate the script if an interrupt signal (e.g. ctrl-c)
# is received. This is considered to be abnormal termination.
main_loop_should_quit = False
def sigint_handler(sig, frame):
global main_loop_should_quit
main_loop_should_quit = True
signal.signal(signal.SIGINT, sigint_handler)
# gracefully terminate the script if a terminate signal is received
# (e.g. kill -TERM).
def sigterm_handler(sig, frame):
global main_loop_should_quit
main_loop_should_quit = True
global exit_code
exit_code = 0
signal.signal(signal.SIGTERM, sigterm_handler)
if compass_enabled == 1:
time.sleep(1) # Wait a short while for yaw to be correctly initiated
send_msg_to_gcs('Sending vision messages to FCU')
accel_logger = mpu6050_logger.mpu6050_logger(log_file_name + "_acceleration")
video_writer = cv2.VideoWriter(log_file_name + ".avi", cv2.VideoWriter_fourcc(*'MJPG'),30, (412,300), 0)
try:
# Configure the OpenCV stereo algorithm. See
# https://docs.opencv.org/3.4/d2/d85/classcv_1_1StereoSGBM.html for a
# description of the parameters
window_size = 5
min_disp = 16
# must be divisible by 16
num_disp = 112 - min_disp
max_disp = min_disp + num_disp
stereo = cv2.StereoSGBM_create(minDisparity = min_disp,
numDisparities = num_disp,
blockSize = 16,
P1 = 8*3*window_size**2,
P2 = 32*3*window_size**2,
disp12MaxDiff = 1,
uniquenessRatio = 10,
speckleWindowSize = 100,
speckleRange = 32)
# Retreive the stream and intrinsic properties for both cameras
profiles = pipe.get_active_profile()
streams = {"left" : profiles.get_stream(rs.stream.fisheye, 1).as_video_stream_profile(),
"right" : profiles.get_stream(rs.stream.fisheye, 2).as_video_stream_profile()}
intrinsics = {"left" : streams["left"].get_intrinsics(),
"right" : streams["right"].get_intrinsics()}
# Print information about both cameras
print("INFO: Using stereo fisheye cameras")
if debug_enable == 1:
print("INFO: T265 Left camera:", intrinsics["left"])
print("INFO: T265 Right camera:", intrinsics["right"])
# Translate the intrinsics from librealsense into OpenCV
K_left = camera_matrix(intrinsics["left"])
D_left = fisheye_distortion(intrinsics["left"])
K_right = camera_matrix(intrinsics["right"])
D_right = fisheye_distortion(intrinsics["right"])
(width, height) = (intrinsics["left"].width, intrinsics["left"].height)
# Get the relative extrinsics between the left and right camera
(R, T) = get_extrinsics(streams["left"], streams["right"])
# We need to determine what focal length our undistorted images should have
# in order to set up the camera matrices for initUndistortRectifyMap. We
# could use stereoRectify, but here we show how to derive these projection
# matrices from the calibration and a desired height and field of view
# We calculate the undistorted focal length:
#
# h
# -----------------
# \ | /
# \ | f /
# \ | /
# \ fov /
# \|/
stereo_fov_rad = 90 * (m.pi/180) # desired fov degree, 90 seems to work ok
stereo_height_px = 300 # 300x300 pixel stereo output
stereo_focal_px = stereo_height_px/2 / m.tan(stereo_fov_rad/2)
# We set the left rotation to identity and the right rotation
# the rotation between the cameras
R_left = np.eye(3)
R_right = R
# The stereo algorithm needs max_disp extra pixels in order to produce valid
# disparity on the desired output region. This changes the width, but the
# center of projection should be on the center of the cropped image
stereo_width_px = stereo_height_px + max_disp
stereo_size = (stereo_width_px, stereo_height_px)
stereo_cx = (stereo_height_px - 1)/2 + max_disp
stereo_cy = (stereo_height_px - 1)/2
# Construct the left and right projection matrices, the only difference is
# that the right projection matrix should have a shift along the x axis of
# baseline * focal_length
P_left = np.array([[stereo_focal_px, 0, stereo_cx, 0],
[0, stereo_focal_px, stereo_cy, 0],
[0, 0, 1, 0]])
P_right = P_left.copy()
P_right[0][3] = T[0] * stereo_focal_px
# Construct Q for use with cv2.reprojectImageTo3D. Subtract max_disp from x
# since we will crop the disparity later
Q = np.array([[1, 0, 0, -(stereo_cx - max_disp)],
[0, 1, 0, -stereo_cy],
[0, 0, 0, stereo_focal_px],
[0, 0, -1/T[0], 0]])
# Create an undistortion map for the left and right camera which applies the
# rectification and undoes the camera distortion. This only has to be done
# once
m1type = cv2.CV_32FC1
(lm1, lm2) = cv2.fisheye.initUndistortRectifyMap(K_left, D_left, R_left, P_left, stereo_size, m1type)
(rm1, rm2) = cv2.fisheye.initUndistortRectifyMap(K_right, D_right, R_right, P_right, stereo_size, m1type)
undistort_rectify = {"left" : (lm1, lm2),
"right" : (rm1, rm2)}
# For AprilTag detection
camera_params = [stereo_focal_px, stereo_focal_px, stereo_cx, stereo_cy]
loop_time = time.time()
vc.start()
while not main_loop_should_quit:
# Wait for the next set of frames from the camera
frames = pipe.wait_for_frames()
# Fetch pose frame
pose = frames.get_pose_frame()
# Process data
if pose:
with lock:
#print(str(time.time()-loop_time) + "s since last loop")
loop_time = time.time()
# Store the timestamp for MAVLink messages
current_time_us = int(round(time.time() * 1000000))
# Pose data consists of translation and rotation
data = pose.get_pose_data()
# Confidence level value from T265: 0-3, remapped to 0 - 100: 0% - Failed / 33.3% - Low / 66.6% - Medium / 100% - High
current_confidence_level = float(data.tracker_confidence * 100 / 3)
# In transformations, Quaternions w+ix+jy+kz are represented as [w, x, y, z]!
H_T265Ref_T265body = tf.quaternion_matrix([data.rotation.w, data.rotation.x, data.rotation.y, data.rotation.z])
H_T265Ref_T265body[0][3] = data.translation.x * scale_factor
H_T265Ref_T265body[1][3] = data.translation.y * scale_factor
H_T265Ref_T265body[2][3] = data.translation.z * scale_factor
# Transform to aeronautic coordinates (body AND reference frame!)
H_aeroRef_aeroBody = H_aeroRef_T265Ref.dot( H_T265Ref_T265body.dot( H_T265body_aeroBody))
# Calculate GLOBAL XYZ speed (speed from T265 is already GLOBAL)
V_aeroRef_aeroBody = tf.quaternion_matrix([1,0,0,0])
V_aeroRef_aeroBody[0][3] = data.velocity.x
V_aeroRef_aeroBody[1][3] = data.velocity.y
V_aeroRef_aeroBody[2][3] = data.velocity.z
V_aeroRef_aeroBody = H_aeroRef_T265Ref.dot(V_aeroRef_aeroBody)
# Check for pose jump and increment reset_counter
if prev_data != None:
delta_translation = [data.translation.x - prev_data.translation.x, data.translation.y - prev_data.translation.y, data.translation.z - prev_data.translation.z]
delta_velocity = [data.velocity.x - prev_data.velocity.x, data.velocity.y - prev_data.velocity.y, data.velocity.z - prev_data.velocity.z]
position_displacement = np.linalg.norm(delta_translation)
speed_delta = np.linalg.norm(delta_velocity)
# Pose jump is indicated when position changes abruptly. The behavior is not well documented yet (as of librealsense 2.34.0)
jump_threshold = 0.1 # in meters, from trials and errors, should be relative to how frequent is the position data obtained (200Hz for the T265)
jump_speed_threshold = 20.0 # in m/s from trials and errors, should be relative to how frequent is the velocity data obtained (200Hz for the T265)
if (position_displacement > jump_threshold) or (speed_delta > jump_speed_threshold):
send_msg_to_gcs('VISO jump detected')
if position_displacement > jump_threshold:
progress("Position jumped by: %s" % position_displacement)
elif speed_delta > jump_speed_threshold:
progress("Speed jumped by: %s" % speed_delta)
increment_reset_counter()
prev_data = data
# Take offsets from body's center of gravity (or IMU) to camera's origin into account
if body_offset_enabled == 1:
H_body_camera = tf.euler_matrix(0, 0, 0, 'sxyz')
H_body_camera[0][3] = body_offset_x
H_body_camera[1][3] = body_offset_y
H_body_camera[2][3] = body_offset_z
H_camera_body = np.linalg.inv(H_body_camera)
H_aeroRef_aeroBody = H_body_camera.dot(H_aeroRef_aeroBody.dot(H_camera_body))
# Realign heading to face north using initial compass data
if compass_enabled == 1:
H_aeroRef_aeroBody = H_aeroRef_aeroBody.dot( tf.euler_matrix(0, 0, heading_north_yaw, 'sxyz'))
# Show debug messages here
if debug_enable == 1:
os.system('clear') # This helps in displaying the messages to be more readable
progress("DEBUG: Raw RPY[deg]: {}".format( np.array( tf.euler_from_matrix( H_T265Ref_T265body, 'sxyz')) * 180 / m.pi))
progress("DEBUG: NED RPY[deg]: {}".format( np.array( tf.euler_from_matrix( H_aeroRef_aeroBody, 'sxyz')) * 180 / m.pi))
progress("DEBUG: Raw pos xyz : {}".format( np.array( [data.translation.x, data.translation.y, data.translation.z])))
progress("DEBUG: NED pos xyz : {}".format( np.array( tf.translation_from_matrix( H_aeroRef_aeroBody))))
# Fetch raw fisheye image frames
f1 = frames.get_fisheye_frame(1).as_video_frame()
left_data = np.asanyarray(f1.get_data())
# Process image streams
frame_copy = {"left" : left_data}
# Undistort and crop the center of the frames
center_undistorted = {"left" : cv2.remap(src = frame_copy["left"],
map1 = undistort_rectify["left"][0],
map2 = undistort_rectify["left"][1],
interpolation = cv2.INTER_LINEAR)}
# Run AprilTag detection algorithm on rectified image.
# Params:
# tag_image_source for "left" or "right"
# tag_landing_size for actual size of the tag
tags = at_detector.detect(center_undistorted[tag_image_source], True, camera_params, tag_landing_size)
if tags != []:
for tag in tags:
# Check for the tag that we want to land on
if tag.tag_id == tag_landing_id:
is_landing_tag_detected = True
H_camera_tag = tf.euler_matrix(0, 0, 0, 'sxyz')
H_camera_tag[0][3] = tag.pose_t[2]
H_camera_tag[1][3] = tag.pose_t[0]
H_camera_tag[2][3] = tag.pose_t[1]
temp_arr = H_aeroRef_aeroBody
temp_arr[0][3]= 0
temp_arr[1][3]= 0
temp_arr[2][3]= 0
rotated_pose = temp_arr.dot(H_camera_tag)
vc.update_target_position(rotated_pose[0][3], rotated_pose[1][3],rotated_pose[2][3])
#vc.update_target_position(tag.pose_t[2], tag.pose_t[0],tag.pose_t[1])
#rotated_pose = H_camera_tag.dot(temp_arr)
#rotated_pose = np.linalg.inv(temp_arr).dot(H_camera_tag) # does nothing
#np.linalg.inv(
#print(H_camera_tag)
#print(H_aeroRef_aeroBody)
#print(rotated_pose)
#print("INFO: Detected landing tag", str(tag.tag_id), " relative to camera at x:", tag.pose_t[2], " ", rotated_pose[0][3], ", y:", tag.pose_t[0], " " ,rotated_pose[1][3], ", z:", tag.pose_t[1], " ", rotated_pose[2][3])
else:
# print("INFO: No tag detected")
is_landing_tag_detected = False
# If enabled, display tag-detected image in a pop-up window, required a monitor to be connected
if True:
# Create color image from source
tags_img = center_undistorted[tag_image_source]
# For each detected tag, draw a bounding box and put the id of the tag in the center
for tag in tags:
# Setup bounding box
for idx in range(len(tag.corners)):
cv2.line(tags_img,
tuple(tag.corners[idx-1, :].astype(int)),
tuple(tag.corners[idx, :].astype(int)),
thickness = 2,
color = (255, 0, 0))
# The text to be put in the image, here we simply put the id of the detected tag
text = str(tag.tag_id)
# get boundary of this text
textsize = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)[0]
# Put the text in the middle of the image
cv2.putText(tags_img,
text,
org = (((tag.corners[0, 0] + tag.corners[2, 0] - textsize[0])/2).astype(int),
((tag.corners[0, 1] + tag.corners[2, 1] + textsize[1])/2).astype(int)),
fontFace = cv2.FONT_HERSHEY_SIMPLEX,
fontScale = 0.5,
thickness = 2,
color = (255, 0, 0))
# Display the image in a window
video_writer.write(tags_img)
if visualization == 1:
cv2.imshow(WINDOW_TITLE, tags_img)
# Read keyboard input on the image window
key = cv2.waitKey(1)
if key == ord('q') or cv2.getWindowProperty(WINDOW_TITLE, cv2.WND_PROP_VISIBLE) < 1:
break
except Exception as e:
progress(e)
except:
send_msg_to_gcs('ERROR IN SCRIPT')
progress("Unexpected error: %s" % sys.exc_info()[0])
finally:
progress('Closing the script...')
# start a timer in case stopping everything nicely doesn't work.
signal.setitimer(signal.ITIMER_REAL, 5) # seconds...
pipe.stop()