diff --git a/depth.npy b/depth.npy index 0680040..f4b91bd 100644 Binary files a/depth.npy and b/depth.npy differ diff --git a/grasper1.py b/grasper1.py index 38d110d..b7aea4a 100644 --- a/grasper1.py +++ b/grasper1.py @@ -61,7 +61,7 @@ def publisher_process(robot): else: gripper_pos = 0 - if args.mode == "capture": + if args.mode == "capture" or args.mode == "pick": global_parameters.INIT_WRIST_PITCH = -1.57 # Joint state publisher @@ -79,18 +79,18 @@ def publisher_process(robot): wrist_roll = INIT_WRIST_ROLL, wrist_yaw = INIT_WRIST_YAW, gripper_pos = gripper_pos) - time.sleep(4) + time.sleep(1) hello_robot.move_to_position(lift_pos=INIT_LIFT_POS, wrist_pitch = global_parameters.INIT_WRIST_PITCH, wrist_roll = INIT_WRIST_ROLL, wrist_yaw = INIT_WRIST_YAW) - time.sleep(2) + time.sleep(1) # Intialsiing Camera - if args.mode == "move" or args.mode == "capture": - camera = RealSenseCamera() + #if args.mode == "move" or args.mode == "capture": + camera = RealSenseCamera() if args.mode == "capture": image_publisher = ImagePublisher(camera) @@ -115,12 +115,14 @@ def publisher_process(robot): rotation = PyKDL.Rotation(1, 0, 0, 0, 1, 0, 0, 0, 1) if args.mode == "pick": - point = PyKDL.Vector(0.14819528, 0.12937662, 0.72112405) + image_publisher = ImagePublisher(camera) + translation, rotation = image_publisher.publish_image() + point = PyKDL.Vector(-translation[1], -translation[0], translation[2]) # Rotation from model frame to pose frame - rotation1 = PyKDL.Rotation(-0.15578863, 0.61516678, -0.77285171, - -0.33291125, 0.70393169, 0.62741554, - 0.93000001, 0.35503522, 0.09513136) + rotation1 = PyKDL.Rotation(rotation[0][0], rotation[0][1], rotation[0][2], + rotation[1][0], rotation[1][1], rotation[1][2], + rotation[2][0], rotation[2][1], rotation[2][2]) # Rotation from camera frame to model frame rotation1_bottom = PyKDL.Rotation(0.0000000, -1.0000000, 0.0000000, @@ -194,4 +196,8 @@ def publisher_process(robot): # Picking the object if (args.mode == "pick"): - hello_robot.pickup(abs(0)) \ No newline at end of file + hello_robot.pickup(abs(0)) + # Put it down for now + time.sleep(3) + hello_robot.move_to_position(gripper_pos = 1) + hello_robot.move_to_position(arm_pos = INIT_ARM_POS) diff --git a/nodes/image_publisher.py b/nodes/image_publisher.py index 1b32292..04e0d69 100644 --- a/nodes/image_publisher.py +++ b/nodes/image_publisher.py @@ -1,4 +1,5 @@ -import rospy +#import rospy +import zmq import numpy as np from PIL import Image as PILImage @@ -10,25 +11,43 @@ IMAGE_PUBLISHER_NAME = '/gopro_image' DEPTH_PUBLISHER_NAME = '/gopro_depth' -def convert_numpy_array_to_float32_multi_array(matrix): +# use zmq to send a numpy array +def send_array(socket, A, flags=0, copy=True, track=False): + """send a numpy array with metadata""" + md = dict( + dtype = str(A.dtype), + shape = A.shape, + ) + socket.send_json(md, flags|zmq.SNDMORE) + return socket.send(np.ascontiguousarray(A), flags, copy=copy, track=track) + +# use zmq to receive a numpy array +def recv_array(socket, flags=0, copy=True, track=False): + """recv a numpy array""" + md = socket.recv_json(flags=flags) + msg = socket.recv(flags=flags, copy=copy, track=track) + A = np.frombuffer(msg, dtype=md['dtype']) + return A.reshape(md['shape']) + +#def convert_numpy_array_to_float32_multi_array(matrix): # Create a Float64MultiArray object - data_to_send = Float32MultiArray() +# data_to_send = Float32MultiArray() # Set the layout parameters - data_to_send.layout.dim.append(MultiArrayDimension()) - data_to_send.layout.dim[0].label = "rows" - data_to_send.layout.dim[0].size = len(matrix) - data_to_send.layout.dim[0].stride = len(matrix) * len(matrix[0]) +# data_to_send.layout.dim.append(MultiArrayDimension()) +# data_to_send.layout.dim[0].label = "rows" +# data_to_send.layout.dim[0].size = len(matrix) +# data_to_send.layout.dim[0].stride = len(matrix) * len(matrix[0]) - data_to_send.layout.dim.append(MultiArrayDimension()) - data_to_send.layout.dim[1].label = "columns" - data_to_send.layout.dim[1].size = len(matrix[0]) - data_to_send.layout.dim[1].stride = len(matrix[0]) +# data_to_send.layout.dim.append(MultiArrayDimension()) +# data_to_send.layout.dim[1].label = "columns" +# data_to_send.layout.dim[1].size = len(matrix[0]) +# data_to_send.layout.dim[1].stride = len(matrix[0]) # Flatten the matrix into a list - data_to_send.data = matrix.flatten().tolist() +# data_to_send.data = matrix.flatten().tolist() - return data_to_send +# return data_to_send class ImagePublisher(): @@ -37,8 +56,13 @@ def __init__(self, camera): self.camera = camera self.bridge = CvBridge() - self.image_publisher = rospy.Publisher(IMAGE_PUBLISHER_NAME, Image, queue_size = 1) - self.depth_publisher = rospy.Publisher(DEPTH_PUBLISHER_NAME, Float32MultiArray, queue_size = 1) + context = zmq.Context() + self.socket = context.socket(zmq.REQ) + self.socket.connect("tcp://172.24.71.253:5555") + #self.image_publisher = rospy.Publisher(IMAGE_PUBLISHER_NAME, Image, queue_size = 1) + #self.depth_publisher = rospy.Publisher(DEPTH_PUBLISHER_NAME, Float32MultiArray, queue_size = 1) + #self.image = None + #self.depth = None def publish_image(self): @@ -46,17 +70,19 @@ def publish_image(self): rotated_image = np.rot90(image, k=-1) rotated_depth = np.rot90(depth, k=-1) - - PILImage.fromarray(rotated_image).save("./images/peiqi_test_rgb1.png") - PILImage.fromarray(rotated_depth).save("./images/peiqi_test_depth1.png") - # np.save("./images/test_rgb.npy", rotated_image) - # np.save("./images/test_depth.npy", rotated_depth) - - try: - image_message = self.bridge.cv2_to_imgmsg(rotated_image, "bgr8") - except CvBridgeError as e: - print(e) - - depth_data = convert_numpy_array_to_float32_multi_array(rotated_depth) - # print(image_message.shape) - # print(depth_data.shape) + PILImage.fromarray(rotated_image).save("./images/peiqi_test_rgb17.png") + PILImage.fromarray(rotated_depth).save("./images/peiqi_test_depth17.png") + send_array(self.socket, rotated_image) + print(self.socket.recv_string()) + send_array(self.socket, rotated_depth) + print(self.socket.recv_string()) + self.socket.send_string("Waiting for tranlation and rotation") + translation = recv_array(self.socket) + self.socket.send_string("translation received") + rotation = recv_array(self.socket) + self.socket.send_string("rotation received") + print("translation: ") + print(translation) + print("rotation: ") + print(rotation) + return translation, rotation