Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[FIX] addresses nan rotation matrices computed when Y contains duplic… #78

Merged
merged 1 commit into from
Feb 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
178 changes: 94 additions & 84 deletions trackdlo/src/initialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
import time
import cv2
import numpy as np
import time

from visualization_msgs.msg import MarkerArray
from scipy import interpolate
Expand All @@ -27,27 +26,28 @@ def camera_info_callback (info):
camera_info_sub.unregister()

def color_thresholding (hsv_image, cur_depth):
# --- rope blue ---
lower = (90, 90, 60)
upper = (130, 255, 255)
mask_dlo = cv2.inRange(hsv_image, lower, upper).astype('uint8')

# --- tape red ---
lower = (130, 60, 40)
upper = (255, 255, 255)
mask_red_1 = cv2.inRange(hsv_image, lower, upper).astype('uint8')
lower = (0, 60, 40)
upper = (10, 255, 255)
mask_red_2 = cv2.inRange(hsv_image, lower, upper).astype('uint8')
mask_marker = cv2.bitwise_or(mask_red_1.copy(), mask_red_2.copy()).astype('uint8')
global lower, upper

mask_dlo = cv2.inRange(hsv_image.copy(), lower, upper).astype('uint8')

# tape green
lower_green = (58, 130, 50)
upper_green = (90, 255, 89)
mask_green = cv2.inRange(hsv_image.copy(), lower_green, upper_green).astype('uint8')

# combine masks
mask = cv2.bitwise_or(mask_marker.copy(), mask_dlo.copy())
mask = cv2.bitwise_or(mask_green.copy(), mask_dlo.copy())

# filter mask base on depth values
mask[cur_depth < 0.59*1000] = 0
mask[cur_depth < 0.57*1000] = 0

return mask, mask_green

return mask
def remove_duplicate_rows(array):
_, idx = np.unique(array, axis=0, return_index=True)
data = array[np.sort(idx)]
rospy.set_param('/init_tracker/num_of_nodes', len(data))
return data

def callback (rgb, depth):
global lower, upper
Expand All @@ -66,77 +66,87 @@ def callback (rgb, depth):
mask = cv2.inRange(hsv_image, lower, upper)
else:
# color thresholding
mask = color_thresholding(hsv_image, cur_depth)

start_time = time.time()
mask = cv2.cvtColor(mask.copy(), cv2.COLOR_GRAY2BGR)

# returns the pixel coord of points (in order). a list of lists
img_scale = 1
extracted_chains = extract_connected_skeleton(visualize_initialization_process, mask, img_scale=img_scale, seg_length=8, max_curvature=25)

all_pixel_coords = []
for chain in extracted_chains:
all_pixel_coords += chain
print('Finished extracting chains. Time taken:', time.time()-start_time)

all_pixel_coords = np.array(all_pixel_coords) * img_scale
all_pixel_coords = np.flip(all_pixel_coords, 1)

pc_z = cur_depth[tuple(map(tuple, all_pixel_coords.T))] / 1000.0
f = proj_matrix[0, 0]
cx = proj_matrix[0, 2]
cy = proj_matrix[1, 2]
pixel_x = all_pixel_coords[:, 1]
pixel_y = all_pixel_coords[:, 0]

pc_x = (pixel_x - cx) * pc_z / f
pc_y = (pixel_y - cy) * pc_z / f
extracted_chains_3d = np.vstack((pc_x, pc_y))
extracted_chains_3d = np.vstack((extracted_chains_3d, pc_z))
extracted_chains_3d = extracted_chains_3d.T

# do not include those without depth values
extracted_chains_3d = extracted_chains_3d[((extracted_chains_3d[:, 0] != 0) | (extracted_chains_3d[:, 1] != 0) | (extracted_chains_3d[:, 2] != 0))]

if multi_color_dlo:
depth_threshold = 0.59 # m
extracted_chains_3d = extracted_chains_3d[extracted_chains_3d[:, 2] > depth_threshold]

# tck, u = interpolate.splprep(extracted_chains_3d.T, s=0.001)
tck, u = interpolate.splprep(extracted_chains_3d.T, s=0.0005)
# 1st fit, less points
u_fine = np.linspace(0, 1, 300) # <-- num fit points
x_fine, y_fine, z_fine = interpolate.splev(u_fine, tck)
spline_pts = np.vstack((x_fine, y_fine, z_fine)).T

# 2nd fit, higher accuracy
num_true_pts = int(np.sum(np.sqrt(np.sum(np.square(np.diff(spline_pts, axis=0)), axis=1))) * 1000)
u_fine = np.linspace(0, 1, num_true_pts) # <-- num true points
x_fine, y_fine, z_fine = interpolate.splev(u_fine, tck)
spline_pts = np.vstack((x_fine, y_fine, z_fine)).T
total_spline_len = np.sum(np.sqrt(np.sum(np.square(np.diff(spline_pts, axis=0)), axis=1)))

init_nodes = spline_pts[np.linspace(0, num_true_pts-1, num_of_nodes).astype(int)]

results = ndarray2MarkerArray(init_nodes, result_frame_id, [1, 150/255, 0, 0.75], [0, 1, 0, 0.75])
results_pub.publish(results)

# add color
pc_rgba = struct.unpack('I', struct.pack('BBBB', 255, 40, 40, 255))[0]
pc_rgba_arr = np.full((len(init_nodes), 1), pc_rgba)
pc_colored = np.hstack((init_nodes, pc_rgba_arr)).astype(object)
pc_colored[:, 3] = pc_colored[:, 3].astype(int)

header.stamp = rospy.Time.now()
converted_points = pcl2.create_cloud(header, fields, pc_colored)
pc_pub.publish(converted_points)

rospy.signal_shutdown('Finished initial node set computation.')
mask, mask_tip = color_thresholding(hsv_image, cur_depth)
try:
start_time = time.time()
mask = cv2.cvtColor(mask.copy(), cv2.COLOR_GRAY2BGR)

# returns the pixel coord of points (in order). a list of lists
img_scale = 1
extracted_chains = extract_connected_skeleton(visualize_initialization_process, mask, img_scale=img_scale, seg_length=8, max_curvature=25)

all_pixel_coords = []
for chain in extracted_chains:
all_pixel_coords += chain
print('Finished extracting chains. Time taken:', time.time()-start_time)

all_pixel_coords = np.array(all_pixel_coords) * img_scale
all_pixel_coords = np.flip(all_pixel_coords, 1)

pc_z = cur_depth[tuple(map(tuple, all_pixel_coords.T))] / 1000.0
f = proj_matrix[0, 0]
cx = proj_matrix[0, 2]
cy = proj_matrix[1, 2]
pixel_x = all_pixel_coords[:, 1]
pixel_y = all_pixel_coords[:, 0]
# if the first mask value is not in the tip mask, reverse the pixel order
if multi_color_dlo:
pixel_value1 = mask_tip[pixel_y[-1],pixel_x[-1]]
if pixel_value1 == 255:
pixel_x, pixel_y = pixel_x[::-1], pixel_y[::-1]

pc_x = (pixel_x - cx) * pc_z / f
pc_y = (pixel_y - cy) * pc_z / f
extracted_chains_3d = np.vstack((pc_x, pc_y))
extracted_chains_3d = np.vstack((extracted_chains_3d, pc_z))
extracted_chains_3d = extracted_chains_3d.T

# do not include those without depth values
extracted_chains_3d = extracted_chains_3d[((extracted_chains_3d[:, 0] != 0) | (extracted_chains_3d[:, 1] != 0) | (extracted_chains_3d[:, 2] != 0))]

if multi_color_dlo:
depth_threshold = 0.57 # m
extracted_chains_3d = extracted_chains_3d[extracted_chains_3d[:, 2] > depth_threshold]

# tck, u = interpolate.splprep(extracted_chains_3d.T, s=0.001)
tck, u = interpolate.splprep(extracted_chains_3d.T, s=0.0005)
# 1st fit, less points
u_fine = np.linspace(0, 1, 300) # <-- num fit points
x_fine, y_fine, z_fine = interpolate.splev(u_fine, tck)
spline_pts = np.vstack((x_fine, y_fine, z_fine)).T

# 2nd fit, higher accuracy
num_true_pts = int(np.sum(np.sqrt(np.sum(np.square(np.diff(spline_pts, axis=0)), axis=1))) * 1000)
u_fine = np.linspace(0, 1, num_true_pts) # <-- num true points
x_fine, y_fine, z_fine = interpolate.splev(u_fine, tck)
spline_pts = np.vstack((x_fine, y_fine, z_fine)).T

nodes = spline_pts[np.linspace(0, num_true_pts-1, num_of_nodes).astype(int)]

init_nodes = remove_duplicate_rows(nodes)
# results = ndarray2MarkerArray(init_nodes, result_frame_id, [1, 150/255, 0, 0.75], [0, 1, 0, 0.75])
results = ndarray2MarkerArray(init_nodes, result_frame_id, [0, 149/255, 203/255, 0.75], [0, 149/255, 203/255, 0.75])
results_pub.publish(results)

# add color
pc_rgba = struct.unpack('I', struct.pack('BBBB', 255, 40, 40, 255))[0]
pc_rgba_arr = np.full((len(init_nodes), 1), pc_rgba)
pc_colored = np.hstack((init_nodes, pc_rgba_arr)).astype(object)
pc_colored[:, 3] = pc_colored[:, 3].astype(int)

header.stamp = rospy.Time.now()
converted_points = pcl2.create_cloud(header, fields, pc_colored)
pc_pub.publish(converted_points)
except:
rospy.logerr("Failed to extract splines.")
rospy.signal_shutdown('Stopping initialization.')

if __name__=='__main__':
rospy.init_node('init_tracker', anonymous=True)

global new_messages, lower, upper
new_messages=False

num_of_nodes = rospy.get_param('/init_tracker/num_of_nodes')
multi_color_dlo = rospy.get_param('/init_tracker/multi_color_dlo')
camera_info_topic = rospy.get_param('/init_tracker/camera_info_topic')
Expand Down
8 changes: 4 additions & 4 deletions trackdlo/src/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def pt2pt_dis_sq(pt1, pt2):
return np.sum(np.square(pt1 - pt2))

def pt2pt_dis(pt1, pt2):
return np.sqrt(np.sum(np.square(pt1 - pt2)))
return np.linalg.norm(pt1-pt2)

# from geeksforgeeks: https://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/
class Point_2D:
Expand Down Expand Up @@ -40,7 +40,7 @@ def orientation(p, q, r):
# See https://www.geeksforgeeks.org/orientation-3-ordered-points/amp/
# for details of below formula.

val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))
val = (np.float64(q.y - p.y) * (r.x - q.x)) - (np.float64(q.x - p.x) * (r.y - q.y))
if (val > 0):

# Clockwise orientation
Expand Down Expand Up @@ -175,7 +175,7 @@ def extract_connected_skeleton (visualize_process, mask, img_scale=10, seg_lengt
cv2.destroyAllWindows()
break

# perform skeletonization
# skeletonization
result = skeletonize(mask, method='zha')
gray = cv2.cvtColor(result.copy(), cv2.COLOR_BGR2GRAY)
gray[gray > 100] = 255
Expand Down Expand Up @@ -213,7 +213,7 @@ def extract_connected_skeleton (visualize_process, mask, img_scale=10, seg_lengt
break

# graph for visualization
mask = cv2.line(mask, contour[i][0], contour[i+1][0], 255, 1)
mask = cv2.line(mask, tuple(contour[i][0]), tuple(contour[i+1][0]), 255, 1)

# if haven't initialized, perform initialization
if cur_seg_start_point is None:
Expand Down
Loading