From 79265fc6e0654c9e89bd17d7a66b837b562de947 Mon Sep 17 00:00:00 2001 From: Karthik Dharmarajan Date: Tue, 27 Feb 2024 02:02:33 -0800 Subject: [PATCH] Implement Robosuite Benchmarking Standardization (#2) * Cleaned up some of the code * Added benchmarking code to be more clear * Added printing of parameters for a user * Updated with inpaint code * Added support for files that contain the configurations * Updated benchmarking code to work * Removed critical hardcoded paths --- xembody/setup.py | 4 +- xembody/xembody/src/benchmark/__init__.py | 0 .../src/benchmark/experiment_config.py | 11 ++ .../src/general/ros_inpaint_publisher.py | 17 ++- .../src/general/ros_inpaint_publisher_sim.py | 18 ++- .../src/ros_ws/src/gazebo_env/CMakeLists.txt | 12 ++ ...nda_gazebo_classic_robosuite_can.launch.py | 32 ++-- ...ift_square_stack_three_threading.launch.py | 33 ++-- ...c_robosuite_three_piece_assembly.launch.py | 31 ++-- .../description/urdf/d435.urdf.xacro | 4 +- ...hand_camera_scene_robosuite_can.urdf.xacro | 2 +- .../panda_no_gripper_control_plugin.cc | 105 +++++++++++++ .../write_data_node_robosuite_better.py | 24 +-- .../src/gazebo_env/worlds/no_shadow_sim.world | 53 +++++++ .../benchmark/config/example_config.yaml | 69 +++++++++ .../benchmark/robosuite_experiment.py | 108 ++++++++++++- .../benchmark/robosuite_experiment_config.py | 45 +++++- .../benchmark/run_robosuite_benchmark.py | 37 +++++ ...valuate_policy_demo_source_robot_server.py | 142 +++++------------- ...valuate_policy_demo_target_robot_client.py | 134 ++++------------- 20 files changed, 603 insertions(+), 278 deletions(-) create mode 100644 xembody/xembody/src/benchmark/__init__.py create mode 100644 xembody/xembody/src/benchmark/experiment_config.py create mode 100644 xembody/xembody/src/ros_ws/src/gazebo_env/panda_no_gripper_control_plugin/panda_no_gripper_control_plugin.cc create mode 100644 xembody/xembody/src/ros_ws/src/gazebo_env/worlds/no_shadow_sim.world create mode 100644 xembody/xembody_robosuite/benchmark/config/example_config.yaml diff --git a/xembody/setup.py b/xembody/setup.py index f6edb62..6810006 100644 --- a/xembody/setup.py +++ b/xembody/setup.py @@ -11,8 +11,10 @@ description='Repo for Mirage', author='Lawrence Chen, Karthik Dharmarajan, Kush Hari', package_dir = {'': '.'}, - packages=find_packages(include=['xembody', 'xembody.*']), + packages=find_packages(include=['xembody', 'xembody.*', 'xembody_robosuite', 'xembody_robosuite.*']), install_requires=[ + "numpy", + "pyyaml", ], extras_require={ "docs": [ diff --git a/xembody/xembody/src/benchmark/__init__.py b/xembody/xembody/src/benchmark/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/xembody/xembody/src/benchmark/experiment_config.py b/xembody/xembody/src/benchmark/experiment_config.py new file mode 100644 index 0000000..acce6a9 --- /dev/null +++ b/xembody/xembody/src/benchmark/experiment_config.py @@ -0,0 +1,11 @@ +from abc import ABC, abstractmethod + +class ExperimentConfig(ABC): + + @abstractmethod + def validate_config(self): + """ + Validate the configuration to see if the values are feasible. + :throws ValueError: If the configuration is not valid. + """ + pass \ No newline at end of file diff --git a/xembody/xembody/src/general/ros_inpaint_publisher.py b/xembody/xembody/src/general/ros_inpaint_publisher.py index 67315c4..548ca8d 100644 --- a/xembody/xembody/src/general/ros_inpaint_publisher.py +++ b/xembody/xembody/src/general/ros_inpaint_publisher.py @@ -2,7 +2,7 @@ from sensor_msgs.msg import Image, PointCloud2 from cv_bridge import CvBridge from xembody.src.general.xembody_publisher import XEmbodyPublisher -from sensor_msgs.msg import PointCloud2, PointField +from sensor_msgs.msg import PointCloud2, PointField, Image from input_filenames_msg.msg import MultipleInpaintImages import cv2 import numpy as np @@ -15,7 +15,7 @@ class ROSInpaintPublisher(XEmbodyPublisher): Publishing data is left to sim or real subclasses. """ - def __init__(self, use_diffusion: bool = False): + def __init__(self, use_diffusion: bool = False, uses_single_img: bool = False): """ Initializes the ROS2 node. """ @@ -39,9 +39,10 @@ def __init__(self, use_diffusion: bool = False): # 0.1 # ) # self._time_sync.registerCallback(self._inpaint_image_callback) - + self._uses_single_img = uses_single_img + self._inpaint_sub_type = MultipleInpaintImages if not uses_single_img else Image self._inpaint_sub = self.node.create_subscription( - MultipleInpaintImages, 'inpainted_image', self._inpaint_single, 1) + self._inpaint_sub_type, 'inpainted_image', self._inpaint_single, 1) self._cv_bridge = CvBridge() self._cv_images = None @@ -111,8 +112,12 @@ def _inpaint_single(self, inpaint_msg): print("Received inpainted images") with self._internal_lock: images = [] - for image in inpaint_msg.images: - images.append(self._cv_bridge.imgmsg_to_cv2(image)) + if not self._uses_single_img: + for image in inpaint_msg.images: + images.append(self._cv_bridge.imgmsg_to_cv2(image)) + else: + images = self._cv_bridge.imgmsg_to_cv2(inpaint_msg) + self._cv_images = images if self._use_diffusion: diff --git a/xembody/xembody/src/general/ros_inpaint_publisher_sim.py b/xembody/xembody/src/general/ros_inpaint_publisher_sim.py index cff2f26..d115eae 100644 --- a/xembody/xembody/src/general/ros_inpaint_publisher_sim.py +++ b/xembody/xembody/src/general/ros_inpaint_publisher_sim.py @@ -30,17 +30,22 @@ class ROSInpaintPublisherSim(ROSInpaintPublisher): to a node that performs inpainting on a target robot. """ - def __init__(self): + def __init__(self, source_robot_info: str, target_robot_info: str): """ Initializes the ROS2 node. + :param source_robot_info: the information about the source robot to determine which interpolation scheme to use + :param target_robot_info: the information about the target robot to determine which interpolation scheme to use """ - super().__init__() + super().__init__(uses_single_img=True) self._publisher = self.node.create_publisher( - InputFilesSimData, 'input_files_data_sim', 1) + InputFilesSimData, '/input_files_data_sim', 1) # TODO: generalize this - self.gripper_interpolator = GripperInterpolator('panda', 'ur5', '/home/kdharmarajan/x-embody/xembody/xembody_robosuite/paired_trajectories_collection/gripper_interpolation_results_no_task_diff.pkl') + self.gripper_interpolator = GripperInterpolator(source_robot_info, target_robot_info, ['/home/mirage/x-embody/xembody/xembody_robosuite/paired_trajectories_collection/gripper_interpolation_results_no_task_diff.pkl', + '/home/mirage/x-embody/xembody/xembody_robosuite/paired_trajectories_collection/gripper_interpolation_results_20_rollouts.pkl']) + # self.gripper_interpolator = GripperInterpolator('panda', 'panda', ['/home/mirage/x-embody/xembody/xembody_robosuite/paired_trajectories_collection/gripper_interpolation_results_no_task_diff.pkl']) + # self.gripper_interpolator = GripperInterpolator('panda', 'ur5', '/home/mirage/x-embody/xembody/xembody_robosuite/paired_trajectories_collection/gripper_interpolation_results_no_task_diff.pkl') def publish_to_ros_node(self, data: ROSInpaintSimData): """ @@ -56,5 +61,6 @@ def publish_to_ros_node(self, data: ROSInpaintSimData): msg.segmentation = segmentation_mask.flatten().tolist() msg.ee_pose = data.ee_pose.flatten().tolist() msg.interpolated_gripper = self.gripper_interpolator.interpolate_gripper(data.gripper_angles).flatten().tolist() - msg.camera_name = data.camera_name - self._publisher.publish(msg) \ No newline at end of file + # msg.camera_name = data.camera_name + self._publisher.publish(msg) + print("Published message") \ No newline at end of file diff --git a/xembody/xembody/src/ros_ws/src/gazebo_env/CMakeLists.txt b/xembody/xembody/src/ros_ws/src/gazebo_env/CMakeLists.txt index 0944e26..0ed1ad5 100644 --- a/xembody/xembody/src/ros_ws/src/gazebo_env/CMakeLists.txt +++ b/xembody/xembody/src/ros_ws/src/gazebo_env/CMakeLists.txt @@ -68,6 +68,18 @@ ament_target_dependencies(panda_control_plugin ) target_link_libraries(panda_control_plugin ${GAZEBO_LIBRARIES} ${OpenCV_LIBS}) +add_library(panda_no_gripper_control_plugin SHARED panda_no_gripper_control_plugin/panda_no_gripper_control_plugin.cc) +ament_target_dependencies(panda_no_gripper_control_plugin + "gazebo_dev" + "gazebo_ros" + "rclcpp" + "std_msgs" + "sensor_msgs" + "message_filters" + "cv_bridge" +) +target_link_libraries(panda_no_gripper_control_plugin ${GAZEBO_LIBRARIES} ${OpenCV_LIBS}) + add_library(ur5_and_panda_no_gripper_control_plugin SHARED ur5_and_panda_no_gripper_control_plugin/ur5_and_panda_no_gripper_control_plugin.cc) ament_target_dependencies(ur5_and_panda_no_gripper_control_plugin "gazebo_dev" diff --git a/xembody/xembody/src/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_can.launch.py b/xembody/xembody/src/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_can.launch.py index f16ff66..61d02e0 100644 --- a/xembody/xembody/src/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_can.launch.py +++ b/xembody/xembody/src/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_can.launch.py @@ -24,9 +24,27 @@ ) from launch_ros.actions import Node from launch_ros.substitutions import FindPackageShare - +import os def generate_launch_description(): + + # Set the path to the Gazebo ROS package + pkg_gazebo_ros = FindPackageShare(package='gazebo_ros').find('gazebo_ros') + + # Set the path to this package. + pkg_share = FindPackageShare(package='gazebo_env').find('gazebo_env') + + # Set the path to the world file + world_file_name = 'no_shadow_sim.world' + world_path = os.path.join(pkg_share, 'worlds', world_file_name) + + world = LaunchConfiguration('world') + + declare_world_cmd = DeclareLaunchArgument( + name='world', + default_value=world_path, + description='Full path to the world model file to load') + # Declare arguments declared_arguments = [] declared_arguments.append( @@ -37,18 +55,14 @@ def generate_launch_description(): ) ) + declared_arguments.append(declare_world_cmd) + # Initialize Arguments gui = LaunchConfiguration("gui") gazebo_server = IncludeLaunchDescription( - PythonLaunchDescriptionSource( - [ - PathJoinSubstitution( - [FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"] - ) - ] - ) - ) + PythonLaunchDescriptionSource(os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')), + launch_arguments={'world': world}.items()) gazebo_client = IncludeLaunchDescription( PythonLaunchDescriptionSource( [ diff --git a/xembody/xembody/src/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_lift_square_stack_three_threading.launch.py b/xembody/xembody/src/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_lift_square_stack_three_threading.launch.py index ab268c2..c38a7a6 100644 --- a/xembody/xembody/src/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_lift_square_stack_three_threading.launch.py +++ b/xembody/xembody/src/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_lift_square_stack_three_threading.launch.py @@ -24,9 +24,26 @@ ) from launch_ros.actions import Node from launch_ros.substitutions import FindPackageShare - +import os def generate_launch_description(): + # Set the path to the Gazebo ROS package + pkg_gazebo_ros = FindPackageShare(package='gazebo_ros').find('gazebo_ros') + + # Set the path to this package. + pkg_share = FindPackageShare(package='gazebo_env').find('gazebo_env') + + # Set the path to the world file + world_file_name = 'no_shadow_sim.world' + world_path = os.path.join(pkg_share, 'worlds', world_file_name) + + world = LaunchConfiguration('world') + + declare_world_cmd = DeclareLaunchArgument( + name='world', + default_value=world_path, + description='Full path to the world model file to load') + # Declare arguments declared_arguments = [] declared_arguments.append( @@ -37,18 +54,14 @@ def generate_launch_description(): ) ) + declared_arguments.append(declare_world_cmd) + # Initialize Arguments gui = LaunchConfiguration("gui") gazebo_server = IncludeLaunchDescription( - PythonLaunchDescriptionSource( - [ - PathJoinSubstitution( - [FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"] - ) - ] - ) - ) + PythonLaunchDescriptionSource(os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')), + launch_arguments={'world': world}.items()) gazebo_client = IncludeLaunchDescription( PythonLaunchDescriptionSource( [ @@ -128,7 +141,7 @@ def generate_launch_description(): nodes = [ gazebo_server, - gazebo_client, + #gazebo_client, node_robot_state_publisher, spawn_entity, joint_state_publisher_node, diff --git a/xembody/xembody/src/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_three_piece_assembly.launch.py b/xembody/xembody/src/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_three_piece_assembly.launch.py index e2f4881..01d3dfe 100644 --- a/xembody/xembody/src/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_three_piece_assembly.launch.py +++ b/xembody/xembody/src/ros_ws/src/gazebo_env/bringup/launch/panda_gazebo_classic_robosuite_three_piece_assembly.launch.py @@ -24,9 +24,26 @@ ) from launch_ros.actions import Node from launch_ros.substitutions import FindPackageShare - +import os def generate_launch_description(): + # Set the path to the Gazebo ROS package + pkg_gazebo_ros = FindPackageShare(package='gazebo_ros').find('gazebo_ros') + + # Set the path to this package. + pkg_share = FindPackageShare(package='gazebo_env').find('gazebo_env') + + # Set the path to the world file + world_file_name = 'no_shadow_sim.world' + world_path = os.path.join(pkg_share, 'worlds', world_file_name) + + world = LaunchConfiguration('world') + + declare_world_cmd = DeclareLaunchArgument( + name='world', + default_value=world_path, + description='Full path to the world model file to load') + # Declare arguments declared_arguments = [] declared_arguments.append( @@ -37,18 +54,14 @@ def generate_launch_description(): ) ) + declared_arguments.append(declare_world_cmd) + # Initialize Arguments gui = LaunchConfiguration("gui") gazebo_server = IncludeLaunchDescription( - PythonLaunchDescriptionSource( - [ - PathJoinSubstitution( - [FindPackageShare("gazebo_ros"), "launch", "gzserver.launch.py"] - ) - ] - ) - ) + PythonLaunchDescriptionSource(os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')), + launch_arguments={'world': world}.items()) gazebo_client = IncludeLaunchDescription( PythonLaunchDescriptionSource( [ diff --git a/xembody/xembody/src/ros_ws/src/gazebo_env/description/urdf/d435.urdf.xacro b/xembody/xembody/src/ros_ws/src/gazebo_env/description/urdf/d435.urdf.xacro index d248a23..534eb53 100644 --- a/xembody/xembody/src/ros_ws/src/gazebo_env/description/urdf/d435.urdf.xacro +++ b/xembody/xembody/src/ros_ws/src/gazebo_env/description/urdf/d435.urdf.xacro @@ -108,8 +108,8 @@ 0.7853981634 R8G8B8 - 256 - 256 + 512 + 512 diff --git a/xembody/xembody/src/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_can.urdf.xacro b/xembody/xembody/src/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_can.urdf.xacro index a3a3261..68d844e 100644 --- a/xembody/xembody/src/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_can.urdf.xacro +++ b/xembody/xembody/src/ros_ws/src/gazebo_env/description/urdf/panda_arm_hand_camera_scene_robosuite_can.urdf.xacro @@ -14,7 +14,7 @@ - + diff --git a/xembody/xembody/src/ros_ws/src/gazebo_env/panda_no_gripper_control_plugin/panda_no_gripper_control_plugin.cc b/xembody/xembody/src/ros_ws/src/gazebo_env/panda_no_gripper_control_plugin/panda_no_gripper_control_plugin.cc new file mode 100644 index 0000000..9df1c55 --- /dev/null +++ b/xembody/xembody/src/ros_ws/src/gazebo_env/panda_no_gripper_control_plugin/panda_no_gripper_control_plugin.cc @@ -0,0 +1,105 @@ +#ifndef _PANDACONTROL_PLUGIN_HH_ +#define _PANDACONTROL_PLUGIN_HH_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace gazebo +{ + /// \brief A plugin to control a NoPhysics sensor. + class PandaControlPlugin : public ModelPlugin + { + /// \brief Constructor + public: PandaControlPlugin() {} + + /// \brief The load function is called by Gazebo when the plugin is + /// inserted into simulation + /// \param[in] _model A pointer to the model that this plugin is + /// attached to. + /// \param[in] _sdf A pointer to the plugin's SDF element. + public: virtual void Load(physics::ModelPtr _model, sdf::ElementPtr _sdf) + { + // Initialize ROS node + this->node_ = gazebo_ros::Node::Get(_sdf); + this->model_name_ = _model->GetName(); + const gazebo_ros::QoS &qos = this->node_->get_qos(); + // Just output a message for now + std::cerr << "KUSHTIMUS PRIME PANDA NO GRIPPER" << "\n"; + this->robot_subscriber_ = this->node_->create_subscription( + "joint_commands", + qos.get_subscription_qos("joint_commands", rclcpp::QoS(1)), + std::bind(&PandaControlPlugin::jointCommandMsg, this, std::placeholders::_1)); + this->gazebo_joint_state_publisher_ = this->node_->create_publisher("/gazebo_joint_states",1); + + // ROS2 Message Filter Tutorial: https://answers.ros.org/question/366440/ros-2-message_filters-timesynchronizer-minimal-example-does-not-reach-callback-function/ + rclcpp::QoS qos_(1); + auto rmw_qos_profile = qos_.get_rmw_qos_profile(); + this->rgb_subscriber_.subscribe(this->node_, "/panda_camera/image_raw", rmw_qos_profile); + this->depth_subscriber_.subscribe(this->node_, "/panda_camera/depth/image_raw", rmw_qos_profile); + this->time_synchronizer_ = std::make_shared>(rgb_subscriber_, depth_subscriber_,1); + this->time_synchronizer_->registerCallback(std::bind(&PandaControlPlugin::imageCallback, this, std::placeholders::_1, std::placeholders::_2)); + + } + + //Needs to be const ConstSharedPtr: https://robotics.stackexchange.com/questions/102503/ros2-message-filters-synchronizer-compilation-error + void imageCallback(const sensor_msgs::msg::Image::ConstSharedPtr rgb_msg,const sensor_msgs::msg::Image::ConstSharedPtr depth_msg) { + this->rgb_msg_ = rgb_msg; + this->depth_msg_ = depth_msg; + } + void jointCommandMsg(const std_msgs::msg::Float64MultiArray::SharedPtr msg) { + std::cout << "Start publish" << std::endl; + physics::WorldPtr world = physics::get_world("default"); + auto model_ptr = world->ModelByName(model_name_); + auto panda_joint1 = model_ptr->GetJoint("panda_joint1"); + auto panda_joint2 = model_ptr->GetJoint("panda_joint2"); + auto panda_joint3 = model_ptr->GetJoint("panda_joint3"); + auto panda_joint4 = model_ptr->GetJoint("panda_joint4"); + auto panda_joint5 = model_ptr->GetJoint("panda_joint5"); + auto panda_joint6 = model_ptr->GetJoint("panda_joint6"); + auto panda_joint7 = model_ptr->GetJoint("panda_joint7"); + // Needs to coordinate with custon_joint_state_publisher_node.py + panda_joint1->SetPosition(0,msg->data[0]); + panda_joint2->SetPosition(0,msg->data[1]); + panda_joint3->SetPosition(0,msg->data[2]); + panda_joint4->SetPosition(0,msg->data[3]); + panda_joint5->SetPosition(0,msg->data[4]); + panda_joint6->SetPosition(0,msg->data[5]); + panda_joint7->SetPosition(0,msg->data[6]); + auto message = sensor_msgs::msg::JointState(); + message.header.stamp = this->node_->get_clock() ->now(); + message.name = {"panda_joint1","panda_joint2","panda_joint3","panda_joint4","panda_joint5","panda_joint6","panda_joint7"}; + message.position = {msg->data[0],msg->data[1],msg->data[2],msg->data[3],msg->data[4],msg->data[5],msg->data[6]}; + //Delay for image to catch up to joint position update + //usleep(100000); + this->gazebo_joint_state_publisher_->publish(message); + std::cout << "Finished publish" << std::endl; + } + + + private: + gazebo_ros::Node::SharedPtr node_; + std::string model_name_; + rclcpp::Subscription::SharedPtr robot_subscriber_; + rclcpp::Publisher::SharedPtr gazebo_joint_state_publisher_; + message_filters::Subscriber rgb_subscriber_; + message_filters::Subscriber depth_subscriber_; + std::shared_ptr> time_synchronizer_; + sensor_msgs::msg::Image::ConstSharedPtr rgb_msg_; + sensor_msgs::msg::Image::ConstSharedPtr depth_msg_; + + }; + + // Tell Gazebo about this plugin, so that Gazebo can call Load on this plugin. + GZ_REGISTER_MODEL_PLUGIN(PandaControlPlugin) +} +#endif diff --git a/xembody/xembody/src/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_better.py b/xembody/xembody/src/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_better.py index 0b522b6..071e0f5 100644 --- a/xembody/xembody/src/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_better.py +++ b/xembody/xembody/src/ros_ws/src/gazebo_env/scripts/write_data_node_robosuite_better.py @@ -2,13 +2,13 @@ import rclpy from rclpy.node import Node -import trimesh -import open3d as o3d from std_msgs.msg import Header, Float64MultiArray,Bool from sensor_msgs.msg import PointCloud2, PointField, CameraInfo, Image import numpy as np from launch_ros.substitutions import FindPackageShare from launch.substitutions import PathJoinSubstitution +import trimesh +import open3d as o3d import os import glob import subprocess @@ -39,9 +39,9 @@ def __init__(self): self.is_ready_ = False self.thetas_ = None self.debug_ = False - self.panda_urdf_ = "/home/lawrence/cross_embodiment_ws/src/gazebo_env/description/urdf/panda_ik_robosuite.urdf" + self.panda_urdf_ = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../../../../src/gazebo_env/description/urdf/panda_ik_robosuite.urdf') self.panda_solver_ = TracIKSolver(self.panda_urdf_,"world","panda_ee") - self.ur5e_urdf_ = "/home/lawrence/cross_embodiment_ws/src/gazebo_env/description/urdf/ur5e_ik_robosuite.urdf" + self.ur5e_urdf_ = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../../../../src/gazebo_env/description/urdf/ur5e_ik_robosuite.urdf') self.chain_ = kp.build_chain_from_urdf(open(self.panda_urdf_).read()) self.ur5e_solver_ = TracIKSolver(self.ur5e_urdf_,"world","ur5e_ee_link") @@ -81,8 +81,9 @@ def __init__(self): self.joint_state_msg = JointState() #Harcoding start position - self.q_init_ = np.array([-0.04536656 , 0.22302045, -0.01685448, -2.57859539, 0.02532237 , 2.93147512,0.83630218]) - + # self.q_init_ = np.array([-0.04536656, 0.22302045, -0.01685448, -2.57859539, 0.02532237, 2.93147512, 0.83630218]) + self.q_init_ = np.array([-0.0644708305, 0.528892205, -0.0626163565, 1.85382987, 0.0547194409, 0.614586870, -1.73115559]) + self.urdf_xacro_path_ = os.path.join(FindPackageShare(package="gazebo_env").find("gazebo_env"),"urdf","panda_arm_hand_only.urdf.xacro") xacro_command = "ros2 run xacro xacro " + self.urdf_xacro_path_ xacro_subprocess = subprocess.Popen( @@ -167,6 +168,7 @@ def __init__(self): self.full_mask_image_publisher_ = self.create_publisher(Image,"full_mask_image",1) self.updated_joints_ = False self.is_ready_ = True + print("WE READY") def gazeboCallback(self,joints,gazebo_rgb,gazebo_depth): joint_command_np = np.array(self.robosuite_qout_list_) @@ -410,10 +412,10 @@ def inpainting(self,rgb,depth,seg_file,gazebo_rgb,gazebo_seg,gazebo_depth): robosuite_rgb_image_masked_inpaint = cv2.inpaint(robosuite_rgb_image_masked,robosuite_segmentation_mask_255,inpaintRadius=3,flags=cv2.INPAINT_TELEA) attempt = robosuite_rgb_image_masked_inpaint * joined_depth_argmin[:,:,np.newaxis] inverted_joined_depth_argmin = 1 - joined_depth_argmin - gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB) - gazebo_robot_only_lab[:,:,0] += 50 - gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR) - gazebo_robot_only_rgb = gazebo_robot_only_mod + # gazebo_robot_only_lab = cv2.cvtColor(gazebo_robot_only_rgb,cv2.COLOR_BGR2LAB) + # gazebo_robot_only_lab[:,:,0] += 50 + # gazebo_robot_only_mod = cv2.cvtColor(gazebo_robot_only_lab,cv2.COLOR_LAB2BGR) + # gazebo_robot_only_rgb = gazebo_robot_only_mod attempt2 = gazebo_robot_only_rgb * inverted_joined_depth_argmin[:,:,np.newaxis] inpainted_image = attempt + attempt2 image_8bit = cv2.convertScaleAbs(inpainted_image) # Convert to 8-bit image @@ -849,6 +851,8 @@ def listenerCallbackOnlineDebug(self,msg): ee_pose = ee_pose @ end_effector_rotation_with_no_translation scipy_rotation = R.from_matrix(ee_pose[:3,:3]) scipy_quaternion = scipy_rotation.as_quat() + # import pdb + # pdb.set_trace() qout = self.panda_solver_.ik(ee_pose,qinit=self.q_init_) b_xyz = 1e-5 b_rpy = 1e-3 diff --git a/xembody/xembody/src/ros_ws/src/gazebo_env/worlds/no_shadow_sim.world b/xembody/xembody/src/ros_ws/src/gazebo_env/worlds/no_shadow_sim.world new file mode 100644 index 0000000..29ead2e --- /dev/null +++ b/xembody/xembody/src/ros_ws/src/gazebo_env/worlds/no_shadow_sim.world @@ -0,0 +1,53 @@ + + + + + + model://ground_plane + + + 0.5 0.5 0.5 1 + 0 + + + + + 0 0 13.89 0 0 0 + 0.5 0.5 0.5 1 + 0.1 0.1 0.1 1 + 0 0 -1 + + 20 + 0.5 + 0.01 + 0.001 + + 0 + + 0.6 + 1 + 1 + + + + 19.45 0.0 10.651246 0.0 1.57 0 + 0.5 0.5 0.5 1 + 0.1 0.1 0.1 1 + 0 0 -1 + + 20 + 0.5 + 0.01 + 0.001 + + 0 + + 0.6 + 1 + 1 + + + + diff --git a/xembody/xembody_robosuite/benchmark/config/example_config.yaml b/xembody/xembody_robosuite/benchmark/config/example_config.yaml new file mode 100644 index 0000000..9db5aa1 --- /dev/null +++ b/xembody/xembody_robosuite/benchmark/config/example_config.yaml @@ -0,0 +1,69 @@ +# The path to the model checkpoint for the source agent +# Note: The agent determines the environment +# Note: The type of observation of the policy matters, as inpainting will not run if the observation is lowdim +source_agent_path: can_low_dim/20240123025005/models/model_epoch_250_PickPlaceCan_success_1.0.pth + +# Similar to source_agent_path, but for the target agent (typically both are the same) +target_agent_path: can_low_dim/20240123025005/models/model_epoch_250_PickPlaceCan_success_1.0.pth + +# Number of rollouts done for evaluation +n_rollouts: 1 + +# Max horizon for each rollout +horizon: 350 +seed: 0 + +# Keep true for now +passive: True + +# Allows for using sockets to connect the source robot process and target robot process +# Ensure that this is true for now until the communication backend is being reworked. +connection: True + +# The types of the source and target robot respectively (Panda, Sawyer, UR5e, Kinova3, Jaco, IIWA) +source_robot_name: "Panda" +target_robot_name: "UR5e" + +# Tracking error thresholds are for when absolute control is used, i.e. +# The controller steps the robot end effector to within tracking_error for the target pose +# There is also a limit on the maximum number of internal iterations, which is num_iter_max +# Typically the tracking error and num_iter_max are the same for both source and target +# Note: if num_iter_max is set to 1, then this is non-blocking control, blocking control otherwise +source_tracking_error_threshold: 0.015 +source_num_iter_max: 300 + +target_tracking_error_threshold: 0.015 +target_num_iter_max: 300 + +# If set to true, control type will be delta as opposed to absolute +delta_action: false + +# Inpainting related configuration +# The conditions to enable it are +# 1) The policy is highdim +# 2) enable_inpainting is set to true +# 3) (Currently, ros mode is the only publicly support mode). Set use_ros to true +# 4) Set offline_eval to False (for now until better documentation is put up to support that mode) +# 5) Set diffusion parameters accordingly +enable_inpainting: false +use_ros: false +offline_eval: false + +use_diffusion: false + +# Diffusion types are still being worked on for public release, so for now, set to "" +diffusion_input_type: "" + +# Set to path for a folder containing both source.txt and target.txt results +results_folder: "results" + +# Optionally, can provide a path for storing videos of the rollouts for both types +# Uncomment below if necessary +# source_video_path: "source_video.mp4" +# target_video_path: "target_video.mp4" + +# Optionally, provide different gripper types to evaluate +# If left commented, it will just default to the normal gripper each robot uses. +# Gripper types can be PandaGripper, RobotiqGripper, or default +# source_gripper_type: "default" +# target_gripper_type: "default" \ No newline at end of file diff --git a/xembody/xembody_robosuite/benchmark/robosuite_experiment.py b/xembody/xembody_robosuite/benchmark/robosuite_experiment.py index b528296..db5d5f1 100644 --- a/xembody/xembody_robosuite/benchmark/robosuite_experiment.py +++ b/xembody/xembody_robosuite/benchmark/robosuite_experiment.py @@ -1,4 +1,8 @@ -from xembody.xembody_robosuite.benchmark.robosuite_experiment_config import ExperimentRobotsuiteConfig +import subprocess +import random +import os + +from xembody_robosuite.benchmark.robosuite_experiment_config import ExperimentRobotsuiteConfig class RobosuiteExperiment: """ @@ -9,7 +13,7 @@ def __init__(self, config: ExperimentRobotsuiteConfig) -> None: self._config = config # Check for config validity - self._config.validate() + self._config.validate_config() self._source_process = None self._target_process = None @@ -20,21 +24,113 @@ def get_config(self) -> ExperimentRobotsuiteConfig: """ return self._config - def launch(self) -> None: + def launch(self, override=False) -> None: """ Launches the experiment """ - pass + if not os.path.exists(self._config.results_folder): + os.makedirs(self._config.results_folder) + else: + if not override: + raise ValueError("Results folder already exists. Please delete the folder or set override to True") + + source_agent_args = ["python3", + "../evaluate_policy_demo_source_robot_server.py", + "--agent", self._config.source_agent_path, + "--n_rollouts", str(self._config.n_rollouts), + "--seeds", str(self._config.seed), + "--tracking_error_threshold", str(self._config.source_tracking_error_threshold), + "--num_iter_max", str(self._config.source_num_iter_max), + "--horizon", str(self._config.horizon), + "--robot_name", self._config.source_robot_name, + "--save_stats_path", os.path.join(self._config.results_folder, "source.txt") + ] + target_agent_args = ["python3", + "../evaluate_policy_demo_target_robot_client.py", + "--agent", self._config.target_agent_path, + "--n_rollouts", str(self._config.n_rollouts), + "--seeds", str(self._config.seed), + "--tracking_error_threshold", str(self._config.target_tracking_error_threshold), + "--num_iter_max", str(self._config.target_num_iter_max), + "--horizon", str(self._config.horizon), + "--robot_name", self._config.target_robot_name, + "--save_stats_path", os.path.join(self._config.results_folder, "target.txt") + ] + + if self._config.source_gripper_type: + source_agent_args.append("--gripper_type") + source_agent_args.append(self._config.source_gripper_type) + + if self._config.target_gripper_type: + target_agent_args.append("--gripper_type") + target_agent_args.append(self._config.target_gripper_type) + + if self._config.connection: + source_agent_args.append("--connection") + target_agent_args.append("--connection") + + # TODO(kdharmarajan): This is not actually guaranteed to be random, so this needs to be fixed downstream. + random_port = random.randint(10000, 65535) + source_agent_args.append("--port") + source_agent_args.append(str(random_port)) + target_agent_args.append("--port") + target_agent_args.append(str(random_port)) + + if self._config.delta_action: + target_agent_args.append("--delta_action") + + if self._config.source_video_path: + source_agent_args.append("--video_path") + source_agent_args.append(self._config.source_gripper_type) + + if self._config.target_video_path: + target_agent_args.append("--video_path") + target_agent_args.append(self._config.target_gripper_type) + + if self._config.enable_inpainting: + source_agent_args.append("--inpaint_enabled") + target_agent_args.append("--inpaint_enabled") + + if self._config.use_ros: + source_agent_args.append("--use_ros") + target_agent_args.append("--use_ros") + + if self._config.offline_eval: + source_agent_args.append("--offline_eval") + target_agent_args.append("--offline_eval") + + if self._config.use_diffusion: + source_agent_args.append("--use_diffusion") + target_agent_args.append("--use_diffusion") + + source_agent_args.append("--diffusion_input_type") + source_agent_args.append(self._config.diffusion_input_type) + target_agent_args.append("--diffusion_input_type") + target_agent_args.append(self._config.diffusion_input_type) + + if self._config.passive: + source_agent_args.append("--passive") + + self._source_process = subprocess.Popen(source_agent_args) + self._target_process = subprocess.Popen(target_agent_args) def stop(self) -> None: """ Stops the experiment """ - pass + self._source_process.kill() + self._target_process.kill() def get_results(self, blocking = False) -> None: """ Returns the results of the experiment :param blocking: If True, waits for the results to be available """ - pass \ No newline at end of file + if blocking: + self._source_process.wait() + self._target_process.wait() + with open(os.path.join(self._config.results_folder, "source.txt"), "r") as source_file: + source_stats = source_file.read() + with open(os.path.join(self._config.results_folder, "target.txt"), "r") as target_file: + target_stats = target_file.read() + return source_stats, target_stats diff --git a/xembody/xembody_robosuite/benchmark/robosuite_experiment_config.py b/xembody/xembody_robosuite/benchmark/robosuite_experiment_config.py index 4e2a90c..e72ceb0 100644 --- a/xembody/xembody_robosuite/benchmark/robosuite_experiment_config.py +++ b/xembody/xembody_robosuite/benchmark/robosuite_experiment_config.py @@ -1,7 +1,8 @@ from dataclasses import dataclass -from xembody.benchmark.experiment_config import ExperimentConfig +from xembody.src.benchmark.experiment_config import ExperimentConfig from typing import Optional from prettytable import PrettyTable +import yaml @dataclass class ExperimentRobotsuiteConfig(ExperimentConfig): @@ -38,6 +39,9 @@ class ExperimentRobotsuiteConfig(ExperimentConfig): use_diffusion: bool diffusion_input_type: str + # Results folder + results_folder: str + # Optional video paths for source and target source_video_path: Optional[str] = None target_video_path: Optional[str] = None @@ -46,7 +50,7 @@ class ExperimentRobotsuiteConfig(ExperimentConfig): source_gripper_type: Optional[str] = None target_gripper_type: Optional[str] = None - def validate(self): + def validate_config(self): """ Validates the configuration to see if the values are feasible. :throws ValueError: If the configuration is not valid. @@ -100,4 +104,39 @@ def __str__(self): table.add_row(["Target Video Path", self.target_video_path]) table.add_row(["Source Gripper Type", self.source_gripper_type]) table.add_row(["Target Gripper Type", self.target_gripper_type]) - return table.get_formatted_string() \ No newline at end of file + return table.get_formatted_string() + + @staticmethod + def from_yaml(yaml_file: str): + """ + Creates an ExperimentRobotsuiteConfig from a yaml file. + """ + # TODO(kdharmarajan): Add validation for the file and provide more meaningful error messages. + with open(yaml_file, 'r') as file: + config = yaml.safe_load(file) + return ExperimentRobotsuiteConfig( + source_agent_path=config["source_agent_path"], + target_agent_path=config["target_agent_path"], + n_rollouts=config["n_rollouts"], + horizon=config["horizon"], + seed=config["seed"], + passive=config["passive"], + connection=config["connection"], + source_robot_name=config["source_robot_name"], + target_robot_name=config["target_robot_name"], + source_tracking_error_threshold=config["source_tracking_error_threshold"], + target_tracking_error_threshold=config["target_tracking_error_threshold"], + source_num_iter_max=config["source_num_iter_max"], + target_num_iter_max=config["target_num_iter_max"], + delta_action=config["delta_action"], + enable_inpainting=config["enable_inpainting"], + use_ros=config["use_ros"], + offline_eval=config["offline_eval"], + use_diffusion=config["use_diffusion"], + diffusion_input_type=config["diffusion_input_type"], + results_folder=config["results_folder"], + source_video_path=config.get("source_video_path"), + target_video_path=config.get("target_video_path"), + source_gripper_type=config.get("source_gripper_type"), + target_gripper_type=config.get("target_gripper_type") + ) diff --git a/xembody/xembody_robosuite/benchmark/run_robosuite_benchmark.py b/xembody/xembody_robosuite/benchmark/run_robosuite_benchmark.py index e69de29..7876aac 100644 --- a/xembody/xembody_robosuite/benchmark/run_robosuite_benchmark.py +++ b/xembody/xembody_robosuite/benchmark/run_robosuite_benchmark.py @@ -0,0 +1,37 @@ +from xembody_robosuite.benchmark.robosuite_experiment_config import ExperimentRobotsuiteConfig +from xembody_robosuite.benchmark.robosuite_experiment import RobosuiteExperiment + +import argparse + +def main(): + parser = argparse.ArgumentParser(description="Mirage Robosuite Benchmark") + parser.add_argument("--config_file", type=str) + args = parser.parse_args() + + print("Loading config from: ", args.config_file) + config = ExperimentRobotsuiteConfig.from_yaml(args.config_file) + print(config) + should_launch = input("Launch the experiment? [Y/n] ") + if should_launch.lower() != "y": + print("Exiting...") + return + + new_experiment = RobosuiteExperiment(config) + + try: + new_experiment.launch() + except ValueError as e: + should_override = input("Results folder already exists. Override? [Y/n] ") + if should_override.lower() != "y": + print("Exiting...") + return + new_experiment.launch(override=True) + + source_results, target_results = new_experiment.get_results(blocking=True) + print("Source Results:") + print(source_results) + print("Target Results:") + print(target_results) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/xembody/xembody_robosuite/evaluate_policy_demo_source_robot_server.py b/xembody/xembody_robosuite/evaluate_policy_demo_source_robot_server.py index 6685be9..d24cc16 100644 --- a/xembody/xembody_robosuite/evaluate_policy_demo_source_robot_server.py +++ b/xembody/xembody_robosuite/evaluate_policy_demo_source_robot_server.py @@ -1,42 +1,3 @@ - - -""" -# Mode 1: Target robot following the source robot -python evaluate_policy_demo_source_robot_server.py --agent /home/kdharmarajan/x-embody/robomimic/pretrained_models/lift_ph_low_dim_epoch_1000_succ_100.pth --n_rollouts 1 --seeds 0 --connection --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_low_dim_1.mp4 - -# Mode 2: Target robot querying the source robot for actions -python evaluate_policy_demo_source_robot_server.py --agent /home/kdharmarajan/x-embody/robomimic/pretrained_models/lift_ph_low_dim_epoch_1000_succ_100.pth --n_rollouts 1 --horizon 400 --seeds 0 --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_low_dim_1.mp4 --connection --passive - -# Mode 3: Demonstration playback -python evaluate_policy_demo_source_robot_server.py --n_rollouts 1 --horizon 400 --seeds 0 --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_demo_playback_1.mp4 --connection --demo_path /home/kdharmarajan/x-embody/robomimic/datasets/lift/mh/demo_v141.hdf5 - -python evaluate_policy_demo_source_robot_server.py --n_rollouts 100 --horizon 400 --seeds 0 --connection --demo_path /home/kdharmarajan/x-embody/robomimic/datasets/lift/mh/demo_v141.hdf5 --save_stats_path /home/kdharmarajan/x-embody/xembody/xembody_robosuite/demo_data_analysis/lift_lowdim_source_sawyer_0.015_300.txt --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_demo_playback_1.mp4 - -# Mode 4: Target robot querying source robot policy based on inpainted images -# 84x84 images -python evaluate_policy_demo_source_robot_server.py --n_rollouts 1 --horizon 400 --seeds 0 --connection --port 30210 --agent /home/kdharmarajan/x-embody/robomimic/bc_trained_models/vanilla_bc_img_agentview_noobject/20231203003603/models/model_epoch_350_Lift_success_0.98.pth --passive --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_inpaint_source.mp4 --tracking_error_threshold 0.02 --num_iter_max 300 --inpaint_enabled - -# 256x256 images -python evaluate_policy_demo_source_robot_server.py --n_rollouts 1 --horizon 400 --seeds 0 --connection --port 30210 --agent /home/kdharmarajan/x-embody/robomimic/bc_trained_models/vanilla_bc_img_agentview_noobject_256/20231219024711/models/model_epoch_200_Lift_success_1.0.pth --passive --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_inpaint_source.mp4 --tracking_error_threshold 0.02 --num_iter_max 300 --inpaint_enabled - -# Forward dynamics evaluation -# Linear Regression -python evaluate_policy_demo_source_robot_server.py --n_rollouts 1 --horizon 400 --seeds 0 --connection --port 30210 --agent /home/kdharmarajan/x-embody/robomimic/bc_trained_models/vanilla_bc_img_agentview_noobject/20231203003603/models/model_epoch_350_Lift_success_0.98.pth --passive --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_inpaint_source.mp4 --tracking_error_threshold 0.02 --num_iter_max 300 --inpaint_enabled --forward_dynamics_model_path /home/kdharmarajan/x-embody/robomimic/forward_dynamics_linear_reg/forward_dynamics_model.pkl - -# Mode 4 Sample Demo for 1 -python evaluate_policy_demo_source_robot_server.py --n_rollouts 1 --horizon 100 --seeds 0 --connection --port 30210 --agent /home/kdharmarajan/x-embody/robomimic/bc_trained_models/vanilla_bc_img_agentview_noobject_dataaug/20231209150046/models/model_epoch_200_Lift_success_1.0.pth --passive --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_inpaint_source_diffusion.mp4 --tracking_error_threshold 0.02 --num_iter_max 300 --inpaint_enabled - - -# Mode 5: Generate paired dataset for diffusion model -python evaluate_policy_demo_source_robot_server.py --n_rollouts 1 --horizon 400 --seeds 0 --connection --port 30210 --agent /home/kdharmarajan/x-embody/robomimic/pretrained_models/lift_ph_low_dim_epoch_1000_succ_100.pth --passive --save_paired_images --save_paired_images_folder_path /home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/diffusion_model_data/success_trajs_withpose --tracking_error_threshold 0.02 - -# Mode 6: Collect demonstration for UR5 -python evaluate_policy_demo_source_robot_server.py --agent /home/kdharmarajan/x-embody/robomimic/pretrained_models/lift_ph_low_dim_epoch_1000_succ_100.pth --n_rollouts 5 --horizon 400 --seeds 0 --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_low_dim_1.mp4 --connection --passive -""" - - - - from PIL import Image import argparse import json @@ -90,7 +51,7 @@ class Data: tracking_error_history = [] class Robot: - def __init__(self, robot_name=None, ckpt_path=None, render=False, video_path=None, rollout_horizon=None, seed=None, dataset_path=None, demo_path=None, inpaint_enabled=False, save_paired_images=False, save_paired_images_folder_path=None, device=None, save_failed_demos=False, gripper_types=None): + def __init__(self, robot_name=None, ckpt_path=None, render=False, video_path=None, rollout_horizon=None, seed=None, dataset_path=None, demo_path=None, inpaint_enabled=False, save_paired_images=False, save_paired_images_folder_path=None, device=None, save_failed_demos=False, gripper_types=None, save_stats_path=None): """_summary_ Args: @@ -115,6 +76,7 @@ def __init__(self, robot_name=None, ckpt_path=None, render=False, video_path=Non self.inpaint_writer = imageio.get_writer(os.path.join(os.path.dirname(self.video_path), "inpaint_video.mp4"), fps=20) if self.video_writer is not None else None self.save_failed_demos = save_failed_demos self.gripper_types = gripper_types + self.save_stats_path = os.path.dirname(save_stats_path) self.inpaint_enabled = inpaint_enabled self.save_paired_images = save_paired_images @@ -122,13 +84,13 @@ def __init__(self, robot_name=None, ckpt_path=None, render=False, video_path=Non if self.inpaint_enabled: base_path = os.path.basename(ckpt_path).split(".")[0] - self.groundtruth_and_inpaintedprediction_path = f"/home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/groundtruth_source_img_{base_path}.npy" - self.inpainted_img_path = f"/home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/inpainted_img_{base_path}.npy" - self.inpainted_rgb_img_path = f"/home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/inpainted_rgb_img_{base_path}_{gripper_types}.jpg" - self.inpaint_data_for_analysis_path = f"/home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/inpaint_data_for_analysis_{base_path}_{gripper_types}.npy" - self.inpaint_data_for_analysis_path_temp = f"/home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/inpaint_data_for_analysis_temp_{base_path}_{gripper_types}.npy" - self.diffusion_model_input_path = f"/home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/diffusion_model_input_{base_path}_{gripper_types}.npy" - self.inpaint_data_for_analysis_path = "/home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/inpaint_ur5_offline_data.npy" + self.groundtruth_and_inpaintedprediction_path = f"{self.save_stats_path}/groundtruth_source_img_{base_path}.npy" + self.inpainted_img_path = f"{self.save_stats_path}/inpainted_img_{base_path}.npy" + self.inpainted_rgb_img_path = f"{self.save_stats_path}/inpainted_rgb_img_{base_path}_{gripper_types}.jpg" + self.inpaint_data_for_analysis_path = f"{self.save_stats_path}/inpaint_data_for_analysis_{base_path}_{gripper_types}.npy" + self.inpaint_data_for_analysis_path_temp = f"{self.save_stats_path}/inpaint_data_for_analysis_temp_{base_path}_{gripper_types}.npy" + self.diffusion_model_input_path = f"{self.save_stats_path}/diffusion_model_input_{base_path}_{gripper_types}.npy" + self.inpaint_data_for_analysis_path = f"{self.save_stats_path}/inpaint_ur5_offline_data.npy" if self.save_paired_images: assert save_paired_images_folder_path is not None, "Please specify save_paired_images_folder_path" @@ -149,7 +111,6 @@ def __init__(self, robot_name=None, ckpt_path=None, render=False, video_path=Non # create environment from saved checkpoint self.device = TorchUtils.get_torch_device(try_to_use_cuda=True) if device is None else device - # breakpoint() if self.use_demo: env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path=self.hdf5_path) self.env = EnvUtils.create_env_for_data_processing( @@ -504,8 +465,8 @@ def run_experiments(self, seeds, rollout_num_episodes=1, video_skip=5, camera_na class SourceRobot(Robot): - def __init__(self, robot_name=None, ckpt_path=None, render=False, video_path=None, rollout_horizon=None, seed=None, dataset_path=None, connection=None, port = 50007, passive=True, demo_path=None, inpaint_enabled=False, forward_dynamics_model_path='/home/kdharmarajan/x-embody/robomimic/forward_dynamics/forward_dynamics_bc_img_300.pth', save_paired_images=False, save_paired_images_folder_path=None, device=None, save_failed_demos=False, naive=False): - super().__init__(robot_name=robot_name, ckpt_path=ckpt_path, render=render, video_path=video_path, rollout_horizon=rollout_horizon, seed=seed, dataset_path=dataset_path, demo_path=demo_path, inpaint_enabled=inpaint_enabled, save_paired_images=save_paired_images, save_paired_images_folder_path=save_paired_images_folder_path, device=device, save_failed_demos=save_failed_demos) + def __init__(self, robot_name=None, ckpt_path=None, render=False, video_path=None, rollout_horizon=None, seed=None, dataset_path=None, connection=None, port = 50007, passive=True, demo_path=None, inpaint_enabled=False, forward_dynamics_model_path='', save_paired_images=False, save_paired_images_folder_path=None, device=None, save_failed_demos=False, naive=False, save_stats_path=None): + super().__init__(robot_name=robot_name, ckpt_path=ckpt_path, render=render, video_path=video_path, rollout_horizon=rollout_horizon, seed=seed, dataset_path=dataset_path, demo_path=demo_path, inpaint_enabled=inpaint_enabled, save_paired_images=save_paired_images, save_paired_images_folder_path=save_paired_images_folder_path, device=device, save_failed_demos=save_failed_demos, save_stats_path=save_stats_path) if connection: HOST = 'localhost' @@ -530,7 +491,7 @@ def __init__(self, robot_name=None, ckpt_path=None, render=False, video_path=Non self.naive = naive if self.naive: - self.interpolator = GripperInterpolator('Panda', robot_name, ['/home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/gripper_interpolation_results_no_task_diff.pkl']) + self.interpolator = GripperInterpolator('Panda', robot_name, [f'{self.save_stats_path}/gripper_interpolation_results_no_task_diff.pkl']) def rollout_robot(self, video_skip=5, return_obs=False, camera_names=None, set_object_state=False, set_robot_pose=False, tracking_error_threshold=0.003, num_iter_max=100, target_robot_delta_action=False, demo_index=0): """ @@ -585,46 +546,26 @@ def rollout_robot(self, video_skip=5, return_obs=False, camera_names=None, set_o self.policy.start_episode() self.initialize_robot() - if False: #self.passive: - # the source robot needs to be first initialized to the target object state and target robot pose - # before the target robot can start executing the policy - # receive target object state and target robot pose from target robot - if self.conn is not None: - data = self.conn.recv(4096) - target_env_robot_state = pickle.loads(data) - # print("Receiving target object state and target robot pose from target robot") - - self.set_object_state(set_to_target_object_state=target_env_robot_state.object_state) - self.drive_robot_to_target_pose(target_env_robot_state.robot_pose) - - # tell the target robot that the source robot is ready - # Create an instance of Data() to send to client. - variable = Data() - variable.message = "Ready" - # Pickle the object and send it to the server - data_string = pickle.dumps(variable) - self.conn.send(data_string) - else: - # the source robot is ready to execute the policy - if self.conn is not None: - # tell the target robot that the source robot is ready - # Create an instance of Data() to send to client. - variable = Data() - variable.object_state = self.get_object_state() - variable.robot_pose = self.compute_eef_pose() - variable.message = "Ready" - # Pickle the object and send it to the server - data_string = pickle.dumps(variable) - message_length = struct.pack("!I", len(data_string)) - self.conn.send(message_length) - self.conn.send(data_string) - # confirm that the target robot is ready - pickled_message_size = self._receive_all_bytes(4) - message_size = struct.unpack("!I", pickled_message_size)[0] - data = self._receive_all_bytes(message_size) - target_env_robot_state = pickle.loads(data) - # print("Receiving target object state and target robot pose from target robot") - assert target_env_robot_state.message == "Ready", "Target robot is not ready" + # the source robot is ready to execute the policy + if self.conn is not None: + # tell the target robot that the source robot is ready + # Create an instance of Data() to send to client. + variable = Data() + variable.object_state = self.get_object_state() + variable.robot_pose = self.compute_eef_pose() + variable.message = "Ready" + # Pickle the object and send it to the server + data_string = pickle.dumps(variable) + message_length = struct.pack("!I", len(data_string)) + self.conn.send(message_length) + self.conn.send(data_string) + # confirm that the target robot is ready + pickled_message_size = self._receive_all_bytes(4) + message_size = struct.unpack("!I", pickled_message_size)[0] + data = self._receive_all_bytes(message_size) + target_env_robot_state = pickle.loads(data) + # print("Receiving target object state and target robot pose from target robot") + assert target_env_robot_state.message == "Ready", "Target robot is not ready" # Save the ground truth so that the target robot can use it even before it does its first step if self.inpaint_enabled: @@ -730,26 +671,13 @@ def rollout_robot(self, video_skip=5, return_obs=False, camera_names=None, set_o # obs_copy['robot0_gripper_qpos'] = gripper_angles action = self.policy(ob=obs_copy) # get action from policy - gt_action = action.copy() - - # add noise for data collection - # if self.save_paired_images: - # # with probability 0.5, add noise to the action - # if np.random.rand() < 0.5: - # print(action) - # # std in the translation components: 0.6, std in the rotation components: 0.5 - # action[:3] += np.random.normal(0, 0.6, 3) - # action[3:6] += np.random.normal(0, 0.5, 3) - # # for gripper, 50% of the time, flip the gripper action - # if np.random.rand() < 0.5: - # action[-1] = 1 - action[-1] - + gt_action = action.copy() if self.inpaint_enabled: # ground truth rgb_img = obs['agentview_image'] rgb_img = rgb_img.transpose(1, 2, 0) - cv2.imwrite("/home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/groundtruth.png", cv2.cvtColor(rgb_img, cv2.COLOR_RGB2BGR) * 255) + cv2.imwrite(f"{self.save_stats_path}/groundtruth.png", cv2.cvtColor(rgb_img, cv2.COLOR_RGB2BGR) * 255) # inpainted image # diffusion_model_input = np.load(self.diffusion_model_input_path) inpainted_image = np.load(self.inpainted_img_path, allow_pickle=True) @@ -1139,6 +1067,6 @@ def _receive_all_bytes(self, num_bytes: int) -> bytes: ) args = parser.parse_args() - source_robot = SourceRobot(robot_name=args.robot_name, ckpt_path=args.agent, render=args.render, video_path=args.video_path, rollout_horizon=args.horizon, seed=None, dataset_path=args.dataset_path, passive=args.passive, port=args.port, connection=args.connection, demo_path=args.demo_path, inpaint_enabled=args.inpaint_enabled, save_paired_images=args.save_paired_images, save_paired_images_folder_path=args.save_paired_images_folder_path, forward_dynamics_model_path=args.forward_dynamics_model_path, device=args.device, save_failed_demos=args.save_failed_demos) + source_robot = SourceRobot(robot_name=args.robot_name, ckpt_path=args.agent, render=args.render, video_path=args.video_path, rollout_horizon=args.horizon, seed=None, dataset_path=args.dataset_path, passive=args.passive, port=args.port, connection=args.connection, demo_path=args.demo_path, inpaint_enabled=args.inpaint_enabled, save_paired_images=args.save_paired_images, save_paired_images_folder_path=args.save_paired_images_folder_path, forward_dynamics_model_path=args.forward_dynamics_model_path, device=args.device, save_failed_demos=args.save_failed_demos, save_stats_path=args.save_stats_path) source_robot.run_experiments(seeds=args.seeds, rollout_num_episodes=args.n_rollouts, video_skip=args.video_skip, camera_names=args.camera_names, dataset_obs=args.dataset_obs, save_stats_path=args.save_stats_path, tracking_error_threshold=args.tracking_error_threshold, num_iter_max=args.num_iter_max, inpaint_online_eval=args.inpaint_enabled) diff --git a/xembody/xembody_robosuite/evaluate_policy_demo_target_robot_client.py b/xembody/xembody_robosuite/evaluate_policy_demo_target_robot_client.py index 4da024d..b77c6f1 100644 --- a/xembody/xembody_robosuite/evaluate_policy_demo_target_robot_client.py +++ b/xembody/xembody_robosuite/evaluate_policy_demo_target_robot_client.py @@ -1,62 +1,16 @@ - - -""" -# Mode 1: Target robot following the source robot -python evaluate_policy_demo_target_robot_client.py --agent /home/kdharmarajan/x-embody/robomimic/pretrained_models/lift_ph_low_dim_epoch_1000_succ_100.pth --n_rollouts 1 --horizon 400 --seed 0 --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_low_dim_2.mp4 --tracking_error_threshold 0.015 --num_iter_max 300 --robot_name Sawyer --passive --connection - -# Mode 2: Target robot querying the source robot for actions -python evaluate_policy_demo_target_robot_client.py --agent /home/kdharmarajan/x-embody/robomimic/pretrained_models/lift_ph_low_dim_epoch_1000_succ_100.pth --n_rollouts 1 --horizon 400 --seed 0 --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_low_dim_2.mp4 --tracking_error_threshold 0.015 --num_iter_max 300 --robot_name Sawyer --connection - -# Mode 3: Demonstration playback -python evaluate_policy_demo_target_robot_client.py --n_rollouts 100 --horizon 400 --seeds 0 --save_stats_path /home/kdharmarajan/x-embody/xembody/xembody_robosuite/demo_data_analysis/lift_lowdim_target_sawyer_0.015_300.txt --connection --demo_path /home/kdharmarajan/x-embody/robomimic/datasets/lift/mh/demo_v141.hdf5 --tracking_error_threshold 0.015 --num_iter_max 300 --robot_name Sawyer --passive --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_demo_playback_2.mp4 - -# Mode 4: Target robot querying source robot policy based on inpainted images -# 84x84 images -python evaluate_policy_demo_target_robot_client.py --n_rollouts 1 --horizon 400 --seeds 0 --connection --port 30210 --agent /home/kdharmarajan/x-embody/robomimic/bc_trained_models/vanilla_bc_img_agentview_noobject/20231203003603/models/model_epoch_350_Lift_success_0.98.pth --tracking_error_threshold 0.02 --num_iter_max 300 --robot_name UR5e --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_inpaint_target.mp4 --inpaint_enabled --offline_eval - -# 256x256 images -python evaluate_policy_demo_target_robot_client.py --n_rollouts 1 --horizon 400 --seeds 0 --connection --port 30210 --agent /home/kdharmarajan/x-embody/robomimic/bc_trained_models/vanilla_bc_img_agentview_noobject_256/20231219024711/models/model_epoch_200_Lift_success_1.0.pth --tracking_error_threshold 0.02 --num_iter_max 300 --robot_name UR5e --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_inpaint_target.mp4 --inpaint_enabled --delta_action --use_diffusion --diffusion_input masked/analytic/target_robot --use_ros --offline_eval - -# Forward dynamics evaluation -python evaluate_policy_demo_target_robot_client.py --n_rollouts 1 --horizon 400 --seeds 0 --connection --port 30210 --agent /home/kdharmarajan/x-embody/robomimic/bc_trained_models/vanilla_bc_img_agentview_noobject/20231203003603/models/model_epoch_350_Lift_success_0.98.pth --tracking_error_threshold 0.02 --num_iter_max 300 --robot_name UR5e --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_inpaint_target.mp4 --inpaint_enabled - -# Mode 4 Sample Demo for 1 -python evaluate_policy_demo_target_robot_client.py --n_rollouts 1 --horizon 100 --seeds 0 --connection --port 30210 --agent /home/kdharmarajan/x-embody/robomimic/bc_trained_models/vanilla_bc_img_agentview_noobject_dataaug/20231209150046/models/model_epoch_200_Lift_success_1.0.pth --tracking_error_threshold 0.02 --num_iter_max 300 --robot_name UR5e --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_inpaint_target_diffusion.mp4 --inpaint_enabled - - -# Mode 5: Generate paired dataset for diffusion model -python evaluate_policy_demo_target_robot_client.py --n_rollouts 1 --horizon 400 --seeds 0 --connection --port 30210 --agent /home/kdharmarajan/x-embody/robomimic/pretrained_models/lift_ph_low_dim_epoch_1000_succ_100.pth --tracking_error_threshold 0.02 --num_iter_max 200 --robot_name UR5e --save_paired_images --save_paired_images_folder_path /home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/diffusion_model_data/success_trajs_withpose - -# Mode 6: Collect demonstration for UR5 -python evaluate_policy_demo_target_robot_client.py --agent /home/kdharmarajan/x-embody/robomimic/pretrained_models/lift_ph_image_epoch_500_succ_100.pth --n_rollouts 5 --horizon 400 --seed 0 --tracking_error_threshold 0.03 --num_iter_max 1 --robot_name UR5e --connection --video_path /home/kdharmarajan/x-embody/robosuite/collected_data/output_lift_low_dim_2.mp4 --dataset_path /home/kdharmarajan/x-embody/xembody/xembody_robosuite/target_robot_demonstration_data/lift_ur5e_5.hdf5 --dataset_obs -""" - - - from PIL import Image import argparse -import json -import h5py -import imageio import struct import numpy as np from copy import deepcopy import socket, pickle -from scipy.spatial.transform import Rotation -import torch import time import cv2 import os -import matplotlib.pyplot as plt -import robomimic -import robomimic.utils.file_utils as FileUtils -import robomimic.utils.torch_utils as TorchUtils import robomimic.utils.tensor_utils as TensorUtils import robomimic.utils.obs_utils as ObsUtils from robomimic.envs.env_base import EnvBase -from robomimic.algo import RolloutPolicy import robosuite.utils.transform_utils as T -from robosuite.utils.mjcf_utils import array_to_string, string_to_array import robosuite.utils.camera_utils as camera_utils from evaluate_policy_demo_source_robot_server import Data, Robot @@ -64,15 +18,14 @@ class TargetRobot(Robot): - def __init__(self, robot_name=None, ckpt_path=None, render=False, video_path=None, rollout_horizon=None, seed=None, dataset_path=None, connection=None, port = 50007, passive=False, demo_path=None, inpaint_enabled=False, offline_eval=False, save_paired_images=False, save_paired_images_folder_path=None, use_diffusion=False, use_ros=False, diffusion_input=None, device=None, save_failed_demos=False, gripper_types=None, naive=None): - super().__init__(robot_name=robot_name, ckpt_path=ckpt_path, render=render, video_path=video_path, rollout_horizon=rollout_horizon, seed=seed, dataset_path=dataset_path, demo_path=demo_path, inpaint_enabled=inpaint_enabled, save_paired_images=save_paired_images, save_paired_images_folder_path=save_paired_images_folder_path, device=device, save_failed_demos=save_failed_demos, gripper_types=gripper_types) + def __init__(self, robot_name=None, ckpt_path=None, render=False, video_path=None, rollout_horizon=None, seed=None, dataset_path=None, connection=None, port = 50007, passive=False, demo_path=None, inpaint_enabled=False, offline_eval=False, save_paired_images=False, save_paired_images_folder_path=None, use_diffusion=False, use_ros=False, diffusion_input=None, device=None, save_failed_demos=False, gripper_types=None, naive=None, save_stats_path=None): + super().__init__(robot_name=robot_name, ckpt_path=ckpt_path, render=render, video_path=video_path, rollout_horizon=rollout_horizon, seed=seed, dataset_path=dataset_path, demo_path=demo_path, inpaint_enabled=inpaint_enabled, save_paired_images=save_paired_images, save_paired_images_folder_path=save_paired_images_folder_path, device=device, save_failed_demos=save_failed_demos, gripper_types=gripper_types, save_stats_path=save_stats_path) if connection: HOST = 'localhost' PORT = port self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.s.connect((HOST, PORT)) - # self.s.settimeout(3) else: self.s = None @@ -135,45 +88,27 @@ def rollout_robot(self, video_skip=5, return_obs=False, camera_names=None, set_o os.makedirs(os.path.join(self.save_paired_images_folder_path, "{}_joint_gripper_pose".format(self.robot_name.lower()), str(demo_index)), exist_ok=True) os.makedirs(os.path.join(self.save_paired_images_folder_path, "{}_depth".format(self.robot_name.lower()), str(demo_index)), exist_ok=True) - if True: #self.passive: - # the target robot needs to be first initialized to the source object state and source robot pose - # receive source object state and source robot pose from source robot - if self.s is not None: - pickled_message_size = self._receive_all_bytes(4) - message_size = struct.unpack("!I", pickled_message_size)[0] - data = self._receive_all_bytes(message_size) - source_env_robot_state = pickle.loads(data) - print("Receiving source object state and source robot pose from source robot") - assert source_env_robot_state.message == "Ready" - self.set_object_state(set_to_target_object_state=source_env_robot_state.object_state) - self.drive_robot_to_target_pose(source_env_robot_state.robot_pose) - - # tell the source robot that the target robot is ready - # Create an instance of Data() to send to client. - variable = Data() - variable.message = "Ready" - # Pickle the object and send it to the server - data_string = pickle.dumps(variable) - message_length = struct.pack("!I", len(data_string)) - self.s.send(message_length) - self.s.send(data_string) - else: - # the target robot is ready to execute the policy - if self.s is not None: - # tell the source robot that the target robot is ready - # Create an instance of Data() to send to client. - variable = Data() - variable.object_state = self.get_object_state() - variable.robot_pose = self.compute_eef_pose() - variable.message = "Ready" - # Pickle the object and send it to the server - data_string = pickle.dumps(variable) - self.s.send(data_string) - - # confirm that the source robot is ready - data = self.s.recv(4096) - source_env_robot_state = pickle.loads(data) - assert source_env_robot_state.message == "Ready", "The source robot is not ready" + # the target robot needs to be first initialized to the source object state and source robot pose + # receive source object state and source robot pose from source robot + if self.s is not None: + pickled_message_size = self._receive_all_bytes(4) + message_size = struct.unpack("!I", pickled_message_size)[0] + data = self._receive_all_bytes(message_size) + source_env_robot_state = pickle.loads(data) + print("Receiving source object state and source robot pose from source robot") + assert source_env_robot_state.message == "Ready" + self.set_object_state(set_to_target_object_state=source_env_robot_state.object_state) + self.drive_robot_to_target_pose(source_env_robot_state.robot_pose) + + # tell the source robot that the target robot is ready + # Create an instance of Data() to send to client. + variable = Data() + variable.message = "Ready" + # Pickle the object and send it to the server + data_string = pickle.dumps(variable) + message_length = struct.pack("!I", len(data_string)) + self.s.send(message_length) + self.s.send(data_string) video_count = 0 # video frame counter total_reward = 0. @@ -259,8 +194,7 @@ def rollout_robot(self, video_skip=5, return_obs=False, camera_names=None, set_o } } - cv2.imwrite("/home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/rgb.png", cv2.cvtColor(rgb_img, cv2.COLOR_RGB2BGR) * 255) - # cv2.imwrite("/home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/rgb.png", cv2.cvtColor(np.array(Image.fromarray(ros_rgb_img).resize((84, 84))).astype(np.float32) / 255.0, cv2.COLOR_RGB2BGR) * 255) + cv2.imwrite(f"{self.save_stats_path}/rgb.png", cv2.cvtColor(rgb_img, cv2.COLOR_RGB2BGR) * 255) if not self.offline_eval: inpainted_image = np.zeros((256, 256, 3), dtype=np.uint8) @@ -279,7 +213,6 @@ def rollout_robot(self, video_skip=5, return_obs=False, camera_names=None, set_o inpainted_image = self.ros_inpaint_publisher.get_inpainted_image(True) inpainted_image = inpainted_image.astype(np.float32) / 255.0 print("Received inpainted image") - # inpainted_image = cv2.cvtColor(inpainted_image, cv2.COLOR_BGR2RGB) if self.naive: inpainted_image = rgb_img @@ -302,13 +235,10 @@ def rollout_robot(self, video_skip=5, return_obs=False, camera_names=None, set_o segmentation_mask_source_robot = source_robot_info["ground_truth"]["segmentation_mask"] masked_image = mask_rgb_image(rgb_img, segmentation_mask_target_robot, segmentation_mask_source_robot) diffusion_input = masked_image.copy() - cv2.imwrite("/home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/masked_image.png", cv2.cvtColor(masked_image, cv2.COLOR_RGB2BGR) * 255) + cv2.imwrite(f"{self.save_stats_path}/masked_image.png", cv2.cvtColor(masked_image, cv2.COLOR_RGB2BGR) * 255) inpainted_image_256, inpainted_image_84 = self.controlnet.inpaint(masked_image) inpainted_image = inpainted_image_256 - else: - # inpainted_image = plt.imread(f"/home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/data/diffusion/inpainted_image_{step_i}.png") - inpainted_image = plt.imread(f"/home/kdharmarajan/x-embody/xembody/xembody_robosuite/image_inpainting/data/results_color_threshold_skimage4/inpaint0.png") np.save(self.inpainted_img_path, inpainted_image, allow_pickle=True) if self.use_diffusion: np.save(self.diffusion_model_input_path, diffusion_input, allow_pickle=True) @@ -322,12 +252,10 @@ def rollout_robot(self, video_skip=5, return_obs=False, camera_names=None, set_o message_length = struct.pack("!I", len(data_string)) self.s.send(message_length) self.s.send(data_string) - # time.sleep(0.1) # if we don't sleep, the target robot will not recieve the message # receive target object state and target robot pose from target robot if self.s is not None: - # breakpoint() pickled_message_size = self._receive_all_bytes(4) message_size = struct.unpack("!I", pickled_message_size)[0] data = self._receive_all_bytes(message_size) @@ -338,16 +266,10 @@ def rollout_robot(self, video_skip=5, return_obs=False, camera_names=None, set_o timestep_info_dict["source_robot"] = np.load(self.groundtruth_and_inpaintedprediction_path, allow_pickle=True).item() if source_env_robot_state.done: print("Source robot is done") - # break if source_env_robot_state.success: print("Source robot is successful") if source_finished_step is None: source_finished_step = step_i - # break - # if set_object_state: - # self.set_object_state(set_to_target_object_state=source_env_robot_state.object_state) - # if set_robot_pose: - # self.drive_robot_to_target_pose(source_env_robot_state.robot_pose) if target_robot_delta_action: # print("Executing delta action") @@ -500,10 +422,6 @@ def rollout_robot(self, video_skip=5, return_obs=False, camera_names=None, set_o if self.use_demo: print("Source robot is done. Target robot is not successful. Demo exhausted") break - # except: - # if self.env.rollout_exceptions: - # print("WARNING: got target robot rollout exception {}".format(self.env.rollout_exceptions)) - stats = dict(Return=total_reward, Horizon=(step_i + 1), Success_Rate=float(has_succeeded)) @@ -765,6 +683,6 @@ def mask_rgb_image(rgb_image, mask1, mask2): time.sleep(4) # wait for the server to start - target_robot = TargetRobot(robot_name=args.robot_name, ckpt_path=args.agent, render=args.render, video_path=args.video_path, rollout_horizon=args.horizon, dataset_path=args.dataset_path, passive=args.passive, port=args.port, connection=args.connection, demo_path=args.demo_path, inpaint_enabled=args.inpaint_enabled, offline_eval=args.offline_eval, save_paired_images=args.save_paired_images, save_paired_images_folder_path=args.save_paired_images_folder_path, use_diffusion=args.use_diffusion, use_ros=args.use_ros, diffusion_input=args.diffusion_input, device=args.device, save_failed_demos=args.save_failed_demos, gripper_types=args.gripper, naive=args.naive) + target_robot = TargetRobot(robot_name=args.robot_name, ckpt_path=args.agent, render=args.render, video_path=args.video_path, rollout_horizon=args.horizon, dataset_path=args.dataset_path, passive=args.passive, port=args.port, connection=args.connection, demo_path=args.demo_path, inpaint_enabled=args.inpaint_enabled, offline_eval=args.offline_eval, save_paired_images=args.save_paired_images, save_paired_images_folder_path=args.save_paired_images_folder_path, use_diffusion=args.use_diffusion, use_ros=args.use_ros, diffusion_input=args.diffusion_input, device=args.device, save_failed_demos=args.save_failed_demos, gripper_types=args.gripper, naive=args.naive, save_stats_path=args.save_stats_path) target_robot.run_experiments(seeds=args.seeds, rollout_num_episodes=args.n_rollouts, video_skip=args.video_skip, camera_names=args.camera_names, dataset_obs=args.dataset_obs, save_stats_path=args.save_stats_path, tracking_error_threshold=args.tracking_error_threshold, num_iter_max=args.num_iter_max, target_robot_delta_action=args.delta_action, inpaint_online_eval=not target_robot.offline_eval)