From ee754b2358a9fbc144122ee0f8992f1d2c22768c Mon Sep 17 00:00:00 2001 From: Karthik Dharmarajan Date: Mon, 29 Apr 2024 02:13:00 -0700 Subject: [PATCH] Added real experiment instructions --- README.md | 3 +++ real_exps/README.md | 23 +++++++++++++++++++++++ real_exps/robot_env_xembody.py | 15 +-------------- 3 files changed, 27 insertions(+), 14 deletions(-) create mode 100644 real_exps/README.md diff --git a/README.md b/README.md index 23c26ec..3fdc409 100644 --- a/README.md +++ b/README.md @@ -71,6 +71,9 @@ python3 run_robosuite_benchmark.py --config config/example_config.yaml ``` Please take a look at the example_config and the different parameters that can be set to run different tasks, agents, and robots. For the above code to work, you must change the agents to the path for the model checkpoints in robosuite. We have provided the sample models used for evaluation in `mirage/mirage/models/{task}/{state_input}/{model_name}.pt`. The tasks are can, lift, square, stack, and two piece. The state inputs could be image_no_proprio (RGB observation only), image_proprio (RGB + proprio state), and low_dim (proprio + manipulated object position state). +### Real Robot Execution +For more details about real world robot execution with Mirage, please see the README in the real_exps folder. + ## Citation If you utilized the benchmark, please consider citing the paper: ``` diff --git a/real_exps/README.md b/real_exps/README.md new file mode 100644 index 0000000..2fb2703 --- /dev/null +++ b/real_exps/README.md @@ -0,0 +1,23 @@ +# Mirage Real Experiments +To evaluate a trajectory with inpainting on a real robot, first start the ROS / Gazebo Inpainting environment. +In terminal 1: +``` +source mirage/mirage/ros_ws/install/setup.bash +ros2 launch gazebo_env {real_launch_file}.launch.py +``` +`real_launch_file` can be any of the launch files that is not the robosuite. Different launch files correspond to different parts of the robot being inpainted or not being inpainted. + +In terminal 2: +``` +source mirage/mirage/ros_ws/install/setup.bash +ros2 run gazebo_env {real_launch_file}.py +``` +For the corresponding writer node, it should have the same suffix as the corresponding launch file. + +Then, fill in the appropriate the appropriate path to the real life model in `model_config_mapping` in `evaluate_robomimic.py`. +In terminal 3, run: +``` +python3 evaluate_robomimic.py +``` + +Note: The robot_env_xembody.py encapsulate a real robot in a typical gym environment. \ No newline at end of file diff --git a/real_exps/robot_env_xembody.py b/real_exps/robot_env_xembody.py index 4e3f5c9..6ba89ea 100644 --- a/real_exps/robot_env_xembody.py +++ b/real_exps/robot_env_xembody.py @@ -158,14 +158,11 @@ def __init__(self, blocking_gripper=False, cam_ids=[22008760, 32474776]): self._robot = UR5Robot(gripper=1) else: self._robot = UR5Robot(gripper=3) - # self._robot.gripper.activate() self._robot.set_tcp(RigidTransform(translation=[0,0.0,-0.03], rotation=RigidTransform.z_axis_rotation(np.pi/2))) # Robotiq gripper - # self._robot.set_tcp(RigidTransform(translation=[0,0.0,0.0], rotation=RigidTransform.z_axis_rotation(3*np.pi/4))) # Franka gripper self.source_franka_target_robotiq = True - # self._robot.gripper.set_speed(100) # from 0 to 100 % self._gripper_is_closed = None self._gripper_being_blocked = False @@ -180,7 +177,6 @@ def __init__(self, blocking_gripper=False, cam_ids=[22008760, 32474776]): self.zedcam2 = None # Initialize the space mouse - # self._controller = SpaceMouseRobotController() time.sleep(0.1) @@ -372,24 +368,15 @@ def evaluate_robomimic_model_trajectory(self, model, traj_index=0, saving_direct inpainted_front_image = inpainted_front_image[::-1, ::-1, :] inpainted_front_image_mask = inpainted_front_image_mask[::-1, ::-1, :] - # inpainted_side_image_pil = Image.fromarray(cv2.resize(inpainted_side_image, (imsize, imsize)).astype(np.uint8)) inpainted_side_image_pil = Image.fromarray(inpainted_side_image.astype(np.uint8)) inpainted_side_image_pil.save(os.path.join(saving_directory, "inpaintng_output.png")) - # inpainted_front_image_pil = Image.fromarray(cv2.resize(inpainted_front_image, (imsize, imsize)).astype(np.uint8)) inpainted_front_image_pil = Image.fromarray(inpainted_front_image.astype(np.uint8)) inpainted_front_image_pil.save(os.path.join(saving_directory, "inpaintng_output_front.png")) - # """ - # inpainted_side_image = side_image - # inpainted_front_image = front_image writer = AsyncWrite(inpainted_side_image, obs_dict["third_person_image"][0], obs_dict["third_person_image"][1], traj_index, saving_directory, i, front=False) - writer.start() - - # writer2 = AsyncWrite(inpainted_front_image, obs_dict["third_person_image2"][0], obs_dict["third_person_image2"][1][::-1, ::-1], traj_index, saving_directory, i, front=True) - # writer2.start() - + writer.start() inpainted_side_image_256 = cv2.resize(inpainted_side_image, (256, 256))